2024-12-12 16:26:44,442 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-12 16:26:44,455 main DEBUG Took 0.010514 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-12 16:26:44,455 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-12 16:26:44,455 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-12 16:26:44,456 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-12 16:26:44,457 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 16:26:44,464 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-12 16:26:44,475 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 16:26:44,476 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 16:26:44,477 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 16:26:44,477 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 16:26:44,478 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 16:26:44,478 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 16:26:44,479 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 16:26:44,479 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 16:26:44,479 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 16:26:44,480 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 16:26:44,480 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 16:26:44,481 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 16:26:44,481 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 16:26:44,481 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 16:26:44,482 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 16:26:44,482 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 16:26:44,482 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 16:26:44,483 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 16:26:44,483 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 16:26:44,483 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 16:26:44,484 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 16:26:44,484 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 16:26:44,484 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 16:26:44,485 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 16:26:44,485 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 16:26:44,485 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-12 16:26:44,487 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 16:26:44,488 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-12 16:26:44,490 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-12 16:26:44,490 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-12 16:26:44,491 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-12 16:26:44,492 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-12 16:26:44,500 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-12 16:26:44,502 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-12 16:26:44,504 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-12 16:26:44,504 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-12 16:26:44,505 main DEBUG createAppenders(={Console}) 2024-12-12 16:26:44,506 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-12 16:26:44,506 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-12 16:26:44,506 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-12 16:26:44,507 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-12 16:26:44,507 main DEBUG OutputStream closed 2024-12-12 16:26:44,507 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-12 16:26:44,507 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-12 16:26:44,508 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-12 16:26:44,574 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-12 16:26:44,576 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-12 16:26:44,577 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-12 16:26:44,578 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-12 16:26:44,578 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-12 16:26:44,579 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-12 16:26:44,579 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-12 16:26:44,579 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-12 16:26:44,580 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-12 16:26:44,580 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-12 16:26:44,580 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-12 16:26:44,580 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-12 16:26:44,581 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-12 16:26:44,581 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-12 16:26:44,581 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-12 16:26:44,582 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-12 16:26:44,582 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-12 16:26:44,583 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-12 16:26:44,585 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-12 16:26:44,585 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-12 16:26:44,585 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-12 16:26:44,586 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-12T16:26:44,886 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290 2024-12-12 16:26:44,889 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-12 16:26:44,890 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-12T16:26:44,899 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-12T16:26:44,924 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-12T16:26:44,927 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/cluster_c3fbcf95-de6c-01d4-3e6c-8a7f0857b2b0, deleteOnExit=true 2024-12-12T16:26:44,927 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-12T16:26:44,928 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/test.cache.data in system properties and HBase conf 2024-12-12T16:26:44,928 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/hadoop.tmp.dir in system properties and HBase conf 2024-12-12T16:26:44,929 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/hadoop.log.dir in system properties and HBase conf 2024-12-12T16:26:44,929 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-12T16:26:44,930 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-12T16:26:44,930 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-12T16:26:45,032 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-12T16:26:45,126 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-12T16:26:45,129 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-12T16:26:45,130 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-12T16:26:45,130 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-12T16:26:45,131 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T16:26:45,131 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-12T16:26:45,132 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-12T16:26:45,132 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T16:26:45,133 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T16:26:45,133 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-12T16:26:45,133 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/nfs.dump.dir in system properties and HBase conf 2024-12-12T16:26:45,134 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/java.io.tmpdir in system properties and HBase conf 2024-12-12T16:26:45,134 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T16:26:45,134 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-12T16:26:45,135 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-12T16:26:46,024 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-12T16:26:46,100 INFO [Time-limited test {}] log.Log(170): Logging initialized @2501ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-12T16:26:46,183 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T16:26:46,255 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T16:26:46,277 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T16:26:46,277 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T16:26:46,278 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T16:26:46,291 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T16:26:46,294 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/hadoop.log.dir/,AVAILABLE} 2024-12-12T16:26:46,295 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T16:26:46,526 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/java.io.tmpdir/jetty-localhost-45221-hadoop-hdfs-3_4_1-tests_jar-_-any-16030180570174541239/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-12T16:26:46,533 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:45221} 2024-12-12T16:26:46,533 INFO [Time-limited test {}] server.Server(415): Started @2935ms 2024-12-12T16:26:46,939 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T16:26:46,947 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T16:26:46,948 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T16:26:46,948 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T16:26:46,949 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T16:26:46,950 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/hadoop.log.dir/,AVAILABLE} 2024-12-12T16:26:46,950 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T16:26:47,079 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/java.io.tmpdir/jetty-localhost-37997-hadoop-hdfs-3_4_1-tests_jar-_-any-8360160096454836916/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T16:26:47,080 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:37997} 2024-12-12T16:26:47,080 INFO [Time-limited test {}] server.Server(415): Started @3483ms 2024-12-12T16:26:47,135 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-12T16:26:47,616 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/cluster_c3fbcf95-de6c-01d4-3e6c-8a7f0857b2b0/dfs/data/data1/current/BP-1654502203-172.17.0.2-1734020805757/current, will proceed with Du for space computation calculation, 2024-12-12T16:26:47,616 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/cluster_c3fbcf95-de6c-01d4-3e6c-8a7f0857b2b0/dfs/data/data2/current/BP-1654502203-172.17.0.2-1734020805757/current, will proceed with Du for space computation calculation, 2024-12-12T16:26:47,657 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-12T16:26:47,724 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4e206ece7aece6e5 with lease ID 0x100200ed4c9010b4: Processing first storage report for DS-d055ea8e-e901-4dd4-b420-da3b57f4f437 from datanode DatanodeRegistration(127.0.0.1:43013, datanodeUuid=ab0ae3d6-289e-4771-88b7-f95f718f4912, infoPort=35401, infoSecurePort=0, ipcPort=44275, storageInfo=lv=-57;cid=testClusterID;nsid=187958621;c=1734020805757) 2024-12-12T16:26:47,725 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4e206ece7aece6e5 with lease ID 0x100200ed4c9010b4: from storage DS-d055ea8e-e901-4dd4-b420-da3b57f4f437 node DatanodeRegistration(127.0.0.1:43013, datanodeUuid=ab0ae3d6-289e-4771-88b7-f95f718f4912, infoPort=35401, infoSecurePort=0, ipcPort=44275, storageInfo=lv=-57;cid=testClusterID;nsid=187958621;c=1734020805757), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-12T16:26:47,726 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4e206ece7aece6e5 with lease ID 0x100200ed4c9010b4: Processing first storage report for DS-b4aac7e8-f27e-4262-9e14-66621282ca4b from datanode DatanodeRegistration(127.0.0.1:43013, datanodeUuid=ab0ae3d6-289e-4771-88b7-f95f718f4912, infoPort=35401, infoSecurePort=0, ipcPort=44275, storageInfo=lv=-57;cid=testClusterID;nsid=187958621;c=1734020805757) 2024-12-12T16:26:47,726 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4e206ece7aece6e5 with lease ID 0x100200ed4c9010b4: from storage DS-b4aac7e8-f27e-4262-9e14-66621282ca4b node DatanodeRegistration(127.0.0.1:43013, datanodeUuid=ab0ae3d6-289e-4771-88b7-f95f718f4912, infoPort=35401, infoSecurePort=0, ipcPort=44275, storageInfo=lv=-57;cid=testClusterID;nsid=187958621;c=1734020805757), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-12T16:26:47,812 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290 2024-12-12T16:26:47,891 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/cluster_c3fbcf95-de6c-01d4-3e6c-8a7f0857b2b0/zookeeper_0, clientPort=52684, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/cluster_c3fbcf95-de6c-01d4-3e6c-8a7f0857b2b0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/cluster_c3fbcf95-de6c-01d4-3e6c-8a7f0857b2b0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-12T16:26:47,902 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=52684 2024-12-12T16:26:47,919 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T16:26:47,923 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T16:26:48,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741825_1001 (size=7) 2024-12-12T16:26:48,592 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc with version=8 2024-12-12T16:26:48,593 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/hbase-staging 2024-12-12T16:26:48,724 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-12T16:26:48,996 INFO [Time-limited test {}] client.ConnectionUtils(129): master/4f6a4780a2f6:0 server-side Connection retries=45 2024-12-12T16:26:49,016 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T16:26:49,016 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T16:26:49,017 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T16:26:49,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T16:26:49,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T16:26:49,160 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T16:26:49,220 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-12T16:26:49,233 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-12T16:26:49,238 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T16:26:49,265 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 11546 (auto-detected) 2024-12-12T16:26:49,266 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-12T16:26:49,284 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33187 2024-12-12T16:26:49,293 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T16:26:49,297 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T16:26:49,315 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:33187 connecting to ZooKeeper ensemble=127.0.0.1:52684 2024-12-12T16:26:49,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:331870x0, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T16:26:49,358 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33187-0x100870d75a70000 connected 2024-12-12T16:26:49,390 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T16:26:49,393 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T16:26:49,396 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T16:26:49,400 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33187 2024-12-12T16:26:49,401 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33187 2024-12-12T16:26:49,401 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33187 2024-12-12T16:26:49,402 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33187 2024-12-12T16:26:49,402 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33187 2024-12-12T16:26:49,410 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc, hbase.cluster.distributed=false 2024-12-12T16:26:49,477 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/4f6a4780a2f6:0 server-side Connection retries=45 2024-12-12T16:26:49,477 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T16:26:49,477 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T16:26:49,477 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T16:26:49,477 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T16:26:49,478 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T16:26:49,480 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T16:26:49,482 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T16:26:49,483 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41933 2024-12-12T16:26:49,485 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-12T16:26:49,490 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-12T16:26:49,492 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T16:26:49,495 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T16:26:49,498 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41933 connecting to ZooKeeper ensemble=127.0.0.1:52684 2024-12-12T16:26:49,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:419330x0, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T16:26:49,503 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41933-0x100870d75a70001 connected 2024-12-12T16:26:49,503 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T16:26:49,505 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T16:26:49,506 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T16:26:49,507 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41933 2024-12-12T16:26:49,508 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41933 2024-12-12T16:26:49,508 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41933 2024-12-12T16:26:49,509 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41933 2024-12-12T16:26:49,509 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41933 2024-12-12T16:26:49,511 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/4f6a4780a2f6,33187,1734020808717 2024-12-12T16:26:49,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T16:26:49,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T16:26:49,520 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/4f6a4780a2f6,33187,1734020808717 2024-12-12T16:26:49,528 DEBUG [M:0;4f6a4780a2f6:33187 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;4f6a4780a2f6:33187 2024-12-12T16:26:49,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T16:26:49,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T16:26:49,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T16:26:49,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T16:26:49,543 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-12T16:26:49,544 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-12T16:26:49,544 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/4f6a4780a2f6,33187,1734020808717 from backup master directory 2024-12-12T16:26:49,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/4f6a4780a2f6,33187,1734020808717 2024-12-12T16:26:49,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T16:26:49,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T16:26:49,548 WARN [master/4f6a4780a2f6:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T16:26:49,548 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=4f6a4780a2f6,33187,1734020808717 2024-12-12T16:26:49,550 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-12T16:26:49,552 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-12T16:26:49,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741826_1002 (size=42) 2024-12-12T16:26:50,023 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/hbase.id with ID: b9ede192-d803-478e-859f-39c19f39e6b5 2024-12-12T16:26:50,065 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T16:26:50,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T16:26:50,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T16:26:50,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741827_1003 (size=196) 2024-12-12T16:26:50,536 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T16:26:50,539 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-12T16:26:50,557 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:50,561 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T16:26:50,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741828_1004 (size=1189) 2024-12-12T16:26:51,012 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store 2024-12-12T16:26:51,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741829_1005 (size=34) 2024-12-12T16:26:51,433 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-12T16:26:51,434 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:26:51,435 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-12T16:26:51,435 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T16:26:51,435 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T16:26:51,435 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-12T16:26:51,435 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T16:26:51,435 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T16:26:51,436 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-12T16:26:51,438 WARN [master/4f6a4780a2f6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store/.initializing 2024-12-12T16:26:51,438 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/WALs/4f6a4780a2f6,33187,1734020808717 2024-12-12T16:26:51,444 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T16:26:51,455 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4f6a4780a2f6%2C33187%2C1734020808717, suffix=, logDir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/WALs/4f6a4780a2f6,33187,1734020808717, archiveDir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/oldWALs, maxLogs=10 2024-12-12T16:26:51,477 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/WALs/4f6a4780a2f6,33187,1734020808717/4f6a4780a2f6%2C33187%2C1734020808717.1734020811460, exclude list is [], retry=0 2024-12-12T16:26:51,494 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43013,DS-d055ea8e-e901-4dd4-b420-da3b57f4f437,DISK] 2024-12-12T16:26:51,497 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-12T16:26:51,536 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/WALs/4f6a4780a2f6,33187,1734020808717/4f6a4780a2f6%2C33187%2C1734020808717.1734020811460 2024-12-12T16:26:51,537 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35401:35401)] 2024-12-12T16:26:51,538 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-12T16:26:51,538 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:26:51,543 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T16:26:51,544 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T16:26:51,587 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T16:26:51,613 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-12T16:26:51,617 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:26:51,620 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T16:26:51,620 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T16:26:51,624 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-12T16:26:51,624 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:26:51,625 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:26:51,625 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T16:26:51,628 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-12T16:26:51,628 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:26:51,629 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:26:51,629 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T16:26:51,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-12T16:26:51,632 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:26:51,633 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:26:51,638 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-12T16:26:51,639 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-12T16:26:51,648 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-12T16:26:51,652 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T16:26:51,657 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T16:26:51,658 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61920844, jitterRate=-0.07730752229690552}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-12T16:26:51,662 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-12T16:26:51,663 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-12T16:26:51,694 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c429f60, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:26:51,729 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-12T16:26:51,741 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-12T16:26:51,741 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-12T16:26:51,743 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-12T16:26:51,745 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-12T16:26:51,749 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-12-12T16:26:51,750 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-12T16:26:51,775 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-12T16:26:51,786 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-12T16:26:51,789 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-12T16:26:51,791 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-12T16:26:51,792 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-12T16:26:51,794 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-12T16:26:51,796 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-12T16:26:51,800 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-12T16:26:51,801 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-12T16:26:51,802 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-12T16:26:51,804 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-12T16:26:51,815 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-12T16:26:51,817 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-12T16:26:51,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T16:26:51,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T16:26:51,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T16:26:51,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T16:26:51,821 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=4f6a4780a2f6,33187,1734020808717, sessionid=0x100870d75a70000, setting cluster-up flag (Was=false) 2024-12-12T16:26:51,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T16:26:51,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T16:26:51,840 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-12T16:26:51,842 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4f6a4780a2f6,33187,1734020808717 2024-12-12T16:26:51,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T16:26:51,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T16:26:51,853 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-12T16:26:51,854 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=4f6a4780a2f6,33187,1734020808717 2024-12-12T16:26:51,929 DEBUG [RS:0;4f6a4780a2f6:41933 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;4f6a4780a2f6:41933 2024-12-12T16:26:51,931 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(1008): ClusterId : b9ede192-d803-478e-859f-39c19f39e6b5 2024-12-12T16:26:51,934 DEBUG [RS:0;4f6a4780a2f6:41933 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-12T16:26:51,935 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-12T16:26:51,939 DEBUG [RS:0;4f6a4780a2f6:41933 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-12T16:26:51,939 DEBUG [RS:0;4f6a4780a2f6:41933 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-12T16:26:51,941 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-12T16:26:51,942 DEBUG [RS:0;4f6a4780a2f6:41933 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-12T16:26:51,943 DEBUG [RS:0;4f6a4780a2f6:41933 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@898b849, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:26:51,944 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-12T16:26:51,944 DEBUG [RS:0;4f6a4780a2f6:41933 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d821eb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4f6a4780a2f6/172.17.0.2:0 2024-12-12T16:26:51,947 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-12T16:26:51,947 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-12T16:26:51,947 DEBUG [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-12T16:26:51,950 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(3073): reportForDuty to master=4f6a4780a2f6,33187,1734020808717 with isa=4f6a4780a2f6/172.17.0.2:41933, startcode=1734020809476 2024-12-12T16:26:51,949 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 4f6a4780a2f6,33187,1734020808717 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-12T16:26:51,953 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/4f6a4780a2f6:0, corePoolSize=5, maxPoolSize=5 2024-12-12T16:26:51,953 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/4f6a4780a2f6:0, corePoolSize=5, maxPoolSize=5 2024-12-12T16:26:51,953 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/4f6a4780a2f6:0, corePoolSize=5, maxPoolSize=5 2024-12-12T16:26:51,953 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/4f6a4780a2f6:0, corePoolSize=5, maxPoolSize=5 2024-12-12T16:26:51,953 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/4f6a4780a2f6:0, corePoolSize=10, maxPoolSize=10 2024-12-12T16:26:51,954 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/4f6a4780a2f6:0, corePoolSize=1, maxPoolSize=1 2024-12-12T16:26:51,954 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/4f6a4780a2f6:0, corePoolSize=2, maxPoolSize=2 2024-12-12T16:26:51,954 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/4f6a4780a2f6:0, corePoolSize=1, maxPoolSize=1 2024-12-12T16:26:51,955 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734020841955 2024-12-12T16:26:51,957 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-12T16:26:51,959 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-12T16:26:51,960 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-12T16:26:51,960 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-12T16:26:51,963 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-12T16:26:51,963 DEBUG [RS:0;4f6a4780a2f6:41933 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T16:26:51,964 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-12T16:26:51,964 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-12T16:26:51,964 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-12T16:26:51,966 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:26:51,966 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-12T16:26:51,966 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-12T16:26:51,968 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-12T16:26:51,969 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-12T16:26:51,970 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-12T16:26:51,972 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-12T16:26:51,973 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-12T16:26:51,974 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/4f6a4780a2f6:0:becomeActiveMaster-HFileCleaner.large.0-1734020811974,5,FailOnTimeoutGroup] 2024-12-12T16:26:51,975 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/4f6a4780a2f6:0:becomeActiveMaster-HFileCleaner.small.0-1734020811974,5,FailOnTimeoutGroup] 2024-12-12T16:26:51,975 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-12T16:26:51,975 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-12T16:26:51,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741831_1007 (size=1039) 2024-12-12T16:26:51,976 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-12T16:26:51,977 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-12T16:26:52,000 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51933, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T16:26:52,005 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33187 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:52,008 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33187 {}] master.ServerManager(486): Registering regionserver=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:52,025 DEBUG [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc 2024-12-12T16:26:52,025 DEBUG [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:45065 2024-12-12T16:26:52,025 DEBUG [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-12T16:26:52,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T16:26:52,029 DEBUG [RS:0;4f6a4780a2f6:41933 {}] zookeeper.ZKUtil(111): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:52,030 WARN [RS:0;4f6a4780a2f6:41933 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T16:26:52,030 INFO [RS:0;4f6a4780a2f6:41933 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T16:26:52,030 DEBUG [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/WALs/4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:52,032 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [4f6a4780a2f6,41933,1734020809476] 2024-12-12T16:26:52,043 DEBUG [RS:0;4f6a4780a2f6:41933 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-12T16:26:52,056 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-12T16:26:52,069 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-12T16:26:52,072 INFO [RS:0;4f6a4780a2f6:41933 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-12T16:26:52,072 INFO [RS:0;4f6a4780a2f6:41933 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T16:26:52,073 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-12T16:26:52,080 INFO [RS:0;4f6a4780a2f6:41933 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-12T16:26:52,080 DEBUG [RS:0;4f6a4780a2f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/4f6a4780a2f6:0, corePoolSize=1, maxPoolSize=1 2024-12-12T16:26:52,080 DEBUG [RS:0;4f6a4780a2f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/4f6a4780a2f6:0, corePoolSize=1, maxPoolSize=1 2024-12-12T16:26:52,080 DEBUG [RS:0;4f6a4780a2f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/4f6a4780a2f6:0, corePoolSize=1, maxPoolSize=1 2024-12-12T16:26:52,081 DEBUG [RS:0;4f6a4780a2f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0, corePoolSize=1, maxPoolSize=1 2024-12-12T16:26:52,081 DEBUG [RS:0;4f6a4780a2f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/4f6a4780a2f6:0, corePoolSize=1, maxPoolSize=1 2024-12-12T16:26:52,081 DEBUG [RS:0;4f6a4780a2f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/4f6a4780a2f6:0, corePoolSize=2, maxPoolSize=2 2024-12-12T16:26:52,081 DEBUG [RS:0;4f6a4780a2f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/4f6a4780a2f6:0, corePoolSize=1, maxPoolSize=1 2024-12-12T16:26:52,081 DEBUG [RS:0;4f6a4780a2f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/4f6a4780a2f6:0, corePoolSize=1, maxPoolSize=1 2024-12-12T16:26:52,081 DEBUG [RS:0;4f6a4780a2f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/4f6a4780a2f6:0, corePoolSize=1, maxPoolSize=1 2024-12-12T16:26:52,081 DEBUG [RS:0;4f6a4780a2f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/4f6a4780a2f6:0, corePoolSize=1, maxPoolSize=1 2024-12-12T16:26:52,081 DEBUG [RS:0;4f6a4780a2f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/4f6a4780a2f6:0, corePoolSize=1, maxPoolSize=1 2024-12-12T16:26:52,082 DEBUG [RS:0;4f6a4780a2f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/4f6a4780a2f6:0, corePoolSize=3, maxPoolSize=3 2024-12-12T16:26:52,082 DEBUG [RS:0;4f6a4780a2f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0, corePoolSize=3, maxPoolSize=3 2024-12-12T16:26:52,082 INFO [RS:0;4f6a4780a2f6:41933 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T16:26:52,083 INFO [RS:0;4f6a4780a2f6:41933 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T16:26:52,083 INFO [RS:0;4f6a4780a2f6:41933 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-12T16:26:52,083 INFO [RS:0;4f6a4780a2f6:41933 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-12T16:26:52,083 INFO [RS:0;4f6a4780a2f6:41933 {}] hbase.ChoreService(168): Chore ScheduledChore name=4f6a4780a2f6,41933,1734020809476-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T16:26:52,113 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-12T16:26:52,116 INFO [RS:0;4f6a4780a2f6:41933 {}] hbase.ChoreService(168): Chore ScheduledChore name=4f6a4780a2f6,41933,1734020809476-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T16:26:52,137 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.Replication(204): 4f6a4780a2f6,41933,1734020809476 started 2024-12-12T16:26:52,138 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(1767): Serving as 4f6a4780a2f6,41933,1734020809476, RpcServer on 4f6a4780a2f6/172.17.0.2:41933, sessionid=0x100870d75a70001 2024-12-12T16:26:52,138 DEBUG [RS:0;4f6a4780a2f6:41933 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-12T16:26:52,138 DEBUG [RS:0;4f6a4780a2f6:41933 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:52,138 DEBUG [RS:0;4f6a4780a2f6:41933 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4f6a4780a2f6,41933,1734020809476' 2024-12-12T16:26:52,138 DEBUG [RS:0;4f6a4780a2f6:41933 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-12T16:26:52,139 DEBUG [RS:0;4f6a4780a2f6:41933 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-12T16:26:52,140 DEBUG [RS:0;4f6a4780a2f6:41933 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-12T16:26:52,140 DEBUG [RS:0;4f6a4780a2f6:41933 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-12T16:26:52,140 DEBUG [RS:0;4f6a4780a2f6:41933 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:52,140 DEBUG [RS:0;4f6a4780a2f6:41933 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '4f6a4780a2f6,41933,1734020809476' 2024-12-12T16:26:52,140 DEBUG [RS:0;4f6a4780a2f6:41933 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-12T16:26:52,141 DEBUG [RS:0;4f6a4780a2f6:41933 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-12T16:26:52,141 DEBUG [RS:0;4f6a4780a2f6:41933 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-12T16:26:52,142 INFO [RS:0;4f6a4780a2f6:41933 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-12T16:26:52,142 INFO [RS:0;4f6a4780a2f6:41933 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-12T16:26:52,247 INFO [RS:0;4f6a4780a2f6:41933 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T16:26:52,251 INFO [RS:0;4f6a4780a2f6:41933 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4f6a4780a2f6%2C41933%2C1734020809476, suffix=, logDir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/WALs/4f6a4780a2f6,41933,1734020809476, archiveDir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/oldWALs, maxLogs=32 2024-12-12T16:26:52,269 DEBUG [RS:0;4f6a4780a2f6:41933 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/WALs/4f6a4780a2f6,41933,1734020809476/4f6a4780a2f6%2C41933%2C1734020809476.1734020812254, exclude list is [], retry=0 2024-12-12T16:26:52,274 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43013,DS-d055ea8e-e901-4dd4-b420-da3b57f4f437,DISK] 2024-12-12T16:26:52,277 INFO [RS:0;4f6a4780a2f6:41933 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/WALs/4f6a4780a2f6,41933,1734020809476/4f6a4780a2f6%2C41933%2C1734020809476.1734020812254 2024-12-12T16:26:52,278 DEBUG [RS:0;4f6a4780a2f6:41933 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35401:35401)] 2024-12-12T16:26:52,378 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-12T16:26:52,379 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc 2024-12-12T16:26:52,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741833_1009 (size=32) 2024-12-12T16:26:52,791 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:26:52,794 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-12T16:26:52,796 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-12T16:26:52,797 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:26:52,798 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T16:26:52,798 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-12T16:26:52,800 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-12T16:26:52,800 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:26:52,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T16:26:52,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-12T16:26:52,804 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-12T16:26:52,804 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:26:52,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T16:26:52,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740 2024-12-12T16:26:52,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740 2024-12-12T16:26:52,810 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T16:26:52,813 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-12T16:26:52,818 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T16:26:52,819 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73589312, jitterRate=0.09656620025634766}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T16:26:52,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-12T16:26:52,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-12T16:26:52,821 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-12T16:26:52,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-12T16:26:52,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-12T16:26:52,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-12T16:26:52,823 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-12T16:26:52,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-12T16:26:52,825 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-12T16:26:52,826 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-12T16:26:52,831 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-12T16:26:52,840 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-12T16:26:52,842 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-12T16:26:52,994 DEBUG [4f6a4780a2f6:33187 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-12T16:26:52,999 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:53,004 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4f6a4780a2f6,41933,1734020809476, state=OPENING 2024-12-12T16:26:53,010 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-12T16:26:53,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T16:26:53,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T16:26:53,013 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T16:26:53,013 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T16:26:53,015 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:26:53,189 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:53,191 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T16:26:53,194 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35672, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T16:26:53,205 INFO [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-12T16:26:53,206 INFO [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T16:26:53,206 INFO [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-12T16:26:53,210 INFO [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=4f6a4780a2f6%2C41933%2C1734020809476.meta, suffix=.meta, logDir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/WALs/4f6a4780a2f6,41933,1734020809476, archiveDir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/oldWALs, maxLogs=32 2024-12-12T16:26:53,227 DEBUG [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/WALs/4f6a4780a2f6,41933,1734020809476/4f6a4780a2f6%2C41933%2C1734020809476.meta.1734020813212.meta, exclude list is [], retry=0 2024-12-12T16:26:53,231 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43013,DS-d055ea8e-e901-4dd4-b420-da3b57f4f437,DISK] 2024-12-12T16:26:53,234 INFO [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/WALs/4f6a4780a2f6,41933,1734020809476/4f6a4780a2f6%2C41933%2C1734020809476.meta.1734020813212.meta 2024-12-12T16:26:53,234 DEBUG [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35401:35401)] 2024-12-12T16:26:53,235 DEBUG [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-12T16:26:53,236 DEBUG [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-12T16:26:53,296 DEBUG [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-12T16:26:53,301 INFO [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-12T16:26:53,305 DEBUG [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-12T16:26:53,305 DEBUG [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:26:53,306 DEBUG [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-12T16:26:53,306 DEBUG [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-12T16:26:53,309 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-12T16:26:53,311 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-12T16:26:53,311 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:26:53,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T16:26:53,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-12T16:26:53,314 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-12T16:26:53,314 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:26:53,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T16:26:53,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-12T16:26:53,317 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-12T16:26:53,317 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:26:53,318 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T16:26:53,319 DEBUG [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740 2024-12-12T16:26:53,322 DEBUG [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740 2024-12-12T16:26:53,324 DEBUG [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T16:26:53,327 DEBUG [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-12T16:26:53,329 INFO [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72637104, jitterRate=0.08237719535827637}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T16:26:53,330 DEBUG [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-12T16:26:53,338 INFO [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734020813183 2024-12-12T16:26:53,349 DEBUG [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-12T16:26:53,349 INFO [RS_OPEN_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-12T16:26:53,351 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:53,352 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 4f6a4780a2f6,41933,1734020809476, state=OPEN 2024-12-12T16:26:53,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T16:26:53,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T16:26:53,357 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T16:26:53,357 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T16:26:53,361 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-12T16:26:53,362 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=4f6a4780a2f6,41933,1734020809476 in 343 msec 2024-12-12T16:26:53,368 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-12T16:26:53,368 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 532 msec 2024-12-12T16:26:53,373 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.4840 sec 2024-12-12T16:26:53,373 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734020813373, completionTime=-1 2024-12-12T16:26:53,373 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-12T16:26:53,373 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-12T16:26:53,410 DEBUG [hconnection-0x526f2908-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:26:53,414 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35676, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:26:53,425 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-12T16:26:53,425 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734020873425 2024-12-12T16:26:53,425 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734020933425 2024-12-12T16:26:53,425 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 51 msec 2024-12-12T16:26:53,455 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4f6a4780a2f6,33187,1734020808717-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T16:26:53,456 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4f6a4780a2f6,33187,1734020808717-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T16:26:53,456 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4f6a4780a2f6,33187,1734020808717-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T16:26:53,458 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-4f6a4780a2f6:33187, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T16:26:53,458 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-12T16:26:53,463 DEBUG [master/4f6a4780a2f6:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-12T16:26:53,466 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-12T16:26:53,467 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-12T16:26:53,473 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-12T16:26:53,476 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T16:26:53,478 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:26:53,479 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T16:26:53,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741835_1011 (size=358) 2024-12-12T16:26:53,896 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => f489058ee189c52324fddaaf21558958, NAME => 'hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc 2024-12-12T16:26:53,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741836_1012 (size=42) 2024-12-12T16:26:53,906 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:26:53,906 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing f489058ee189c52324fddaaf21558958, disabling compactions & flushes 2024-12-12T16:26:53,906 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958. 2024-12-12T16:26:53,907 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958. 2024-12-12T16:26:53,907 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958. after waiting 0 ms 2024-12-12T16:26:53,907 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958. 2024-12-12T16:26:53,907 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958. 2024-12-12T16:26:53,907 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for f489058ee189c52324fddaaf21558958: 2024-12-12T16:26:53,909 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T16:26:53,916 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734020813910"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734020813910"}]},"ts":"1734020813910"} 2024-12-12T16:26:53,939 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T16:26:53,941 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T16:26:53,944 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020813942"}]},"ts":"1734020813942"} 2024-12-12T16:26:53,949 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-12T16:26:53,956 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=f489058ee189c52324fddaaf21558958, ASSIGN}] 2024-12-12T16:26:53,959 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=f489058ee189c52324fddaaf21558958, ASSIGN 2024-12-12T16:26:53,961 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=f489058ee189c52324fddaaf21558958, ASSIGN; state=OFFLINE, location=4f6a4780a2f6,41933,1734020809476; forceNewPlan=false, retain=false 2024-12-12T16:26:54,112 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=f489058ee189c52324fddaaf21558958, regionState=OPENING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:54,116 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure f489058ee189c52324fddaaf21558958, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:26:54,270 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:54,277 INFO [RS_OPEN_PRIORITY_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958. 2024-12-12T16:26:54,277 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => f489058ee189c52324fddaaf21558958, NAME => 'hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958.', STARTKEY => '', ENDKEY => ''} 2024-12-12T16:26:54,278 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace f489058ee189c52324fddaaf21558958 2024-12-12T16:26:54,278 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:26:54,278 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for f489058ee189c52324fddaaf21558958 2024-12-12T16:26:54,278 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for f489058ee189c52324fddaaf21558958 2024-12-12T16:26:54,281 INFO [StoreOpener-f489058ee189c52324fddaaf21558958-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f489058ee189c52324fddaaf21558958 2024-12-12T16:26:54,283 INFO [StoreOpener-f489058ee189c52324fddaaf21558958-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f489058ee189c52324fddaaf21558958 columnFamilyName info 2024-12-12T16:26:54,283 DEBUG [StoreOpener-f489058ee189c52324fddaaf21558958-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:26:54,284 INFO [StoreOpener-f489058ee189c52324fddaaf21558958-1 {}] regionserver.HStore(327): Store=f489058ee189c52324fddaaf21558958/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:26:54,285 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/namespace/f489058ee189c52324fddaaf21558958 2024-12-12T16:26:54,286 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/namespace/f489058ee189c52324fddaaf21558958 2024-12-12T16:26:54,290 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for f489058ee189c52324fddaaf21558958 2024-12-12T16:26:54,293 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/namespace/f489058ee189c52324fddaaf21558958/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T16:26:54,294 INFO [RS_OPEN_PRIORITY_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened f489058ee189c52324fddaaf21558958; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65026283, jitterRate=-0.031032875180244446}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T16:26:54,296 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for f489058ee189c52324fddaaf21558958: 2024-12-12T16:26:54,298 INFO [RS_OPEN_PRIORITY_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958., pid=6, masterSystemTime=1734020814270 2024-12-12T16:26:54,301 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958. 2024-12-12T16:26:54,301 INFO [RS_OPEN_PRIORITY_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958. 2024-12-12T16:26:54,302 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=f489058ee189c52324fddaaf21558958, regionState=OPEN, openSeqNum=2, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:54,309 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-12T16:26:54,309 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure f489058ee189c52324fddaaf21558958, server=4f6a4780a2f6,41933,1734020809476 in 189 msec 2024-12-12T16:26:54,313 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-12T16:26:54,313 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=f489058ee189c52324fddaaf21558958, ASSIGN in 353 msec 2024-12-12T16:26:54,315 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T16:26:54,315 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020814315"}]},"ts":"1734020814315"} 2024-12-12T16:26:54,318 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-12T16:26:54,322 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T16:26:54,324 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 854 msec 2024-12-12T16:26:54,377 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-12T16:26:54,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-12T16:26:54,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T16:26:54,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T16:26:54,413 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-12T16:26:54,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-12T16:26:54,435 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 26 msec 2024-12-12T16:26:54,448 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-12T16:26:54,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-12T16:26:54,466 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 17 msec 2024-12-12T16:26:54,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-12T16:26:54,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-12T16:26:54,478 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.930sec 2024-12-12T16:26:54,479 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-12T16:26:54,481 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-12T16:26:54,482 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-12T16:26:54,482 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-12T16:26:54,482 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-12T16:26:54,483 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4f6a4780a2f6,33187,1734020808717-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T16:26:54,484 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4f6a4780a2f6,33187,1734020808717-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-12T16:26:54,490 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-12T16:26:54,491 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-12T16:26:54,491 INFO [master/4f6a4780a2f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=4f6a4780a2f6,33187,1734020808717-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T16:26:54,535 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e83c466 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39dee83f 2024-12-12T16:26:54,536 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-12T16:26:54,543 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67b8b597, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:26:54,547 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-12T16:26:54,547 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-12T16:26:54,556 DEBUG [hconnection-0x4c09ef46-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:26:54,568 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35692, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:26:54,578 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=4f6a4780a2f6,33187,1734020808717 2024-12-12T16:26:54,594 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=148, ProcessCount=11, AvailableMemoryMB=8447 2024-12-12T16:26:54,605 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T16:26:54,608 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45076, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T16:26:54,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T16:26:54,641 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T16:26:54,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T16:26:54,647 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T16:26:54,647 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:26:54,647 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-12T16:26:54,650 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T16:26:54,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T16:26:54,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741837_1013 (size=963) 2024-12-12T16:26:54,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T16:26:54,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T16:26:55,078 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc 2024-12-12T16:26:55,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741838_1014 (size=53) 2024-12-12T16:26:55,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T16:26:55,489 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:26:55,489 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing b22602467dd4e6c94f26649b7855f8e8, disabling compactions & flushes 2024-12-12T16:26:55,489 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:55,489 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:55,489 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. after waiting 0 ms 2024-12-12T16:26:55,489 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:55,489 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:55,489 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:26:55,491 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T16:26:55,491 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734020815491"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734020815491"}]},"ts":"1734020815491"} 2024-12-12T16:26:55,494 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T16:26:55,496 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T16:26:55,496 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020815496"}]},"ts":"1734020815496"} 2024-12-12T16:26:55,499 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T16:26:55,503 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b22602467dd4e6c94f26649b7855f8e8, ASSIGN}] 2024-12-12T16:26:55,505 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b22602467dd4e6c94f26649b7855f8e8, ASSIGN 2024-12-12T16:26:55,506 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=b22602467dd4e6c94f26649b7855f8e8, ASSIGN; state=OFFLINE, location=4f6a4780a2f6,41933,1734020809476; forceNewPlan=false, retain=false 2024-12-12T16:26:55,657 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=b22602467dd4e6c94f26649b7855f8e8, regionState=OPENING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:55,660 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:26:55,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T16:26:55,814 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:55,821 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:55,821 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} 2024-12-12T16:26:55,822 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:26:55,822 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:26:55,822 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:26:55,822 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:26:55,824 INFO [StoreOpener-b22602467dd4e6c94f26649b7855f8e8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:26:55,828 INFO [StoreOpener-b22602467dd4e6c94f26649b7855f8e8-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:26:55,829 INFO [StoreOpener-b22602467dd4e6c94f26649b7855f8e8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b22602467dd4e6c94f26649b7855f8e8 columnFamilyName A 2024-12-12T16:26:55,829 DEBUG [StoreOpener-b22602467dd4e6c94f26649b7855f8e8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:26:55,830 INFO [StoreOpener-b22602467dd4e6c94f26649b7855f8e8-1 {}] regionserver.HStore(327): Store=b22602467dd4e6c94f26649b7855f8e8/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:26:55,830 INFO [StoreOpener-b22602467dd4e6c94f26649b7855f8e8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:26:55,833 INFO [StoreOpener-b22602467dd4e6c94f26649b7855f8e8-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:26:55,833 INFO [StoreOpener-b22602467dd4e6c94f26649b7855f8e8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b22602467dd4e6c94f26649b7855f8e8 columnFamilyName B 2024-12-12T16:26:55,833 DEBUG [StoreOpener-b22602467dd4e6c94f26649b7855f8e8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:26:55,834 INFO [StoreOpener-b22602467dd4e6c94f26649b7855f8e8-1 {}] regionserver.HStore(327): Store=b22602467dd4e6c94f26649b7855f8e8/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:26:55,834 INFO [StoreOpener-b22602467dd4e6c94f26649b7855f8e8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:26:55,837 INFO [StoreOpener-b22602467dd4e6c94f26649b7855f8e8-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:26:55,837 INFO [StoreOpener-b22602467dd4e6c94f26649b7855f8e8-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b22602467dd4e6c94f26649b7855f8e8 columnFamilyName C 2024-12-12T16:26:55,837 DEBUG [StoreOpener-b22602467dd4e6c94f26649b7855f8e8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:26:55,838 INFO [StoreOpener-b22602467dd4e6c94f26649b7855f8e8-1 {}] regionserver.HStore(327): Store=b22602467dd4e6c94f26649b7855f8e8/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:26:55,839 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:55,840 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:26:55,841 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:26:55,844 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T16:26:55,846 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:26:55,850 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T16:26:55,850 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened b22602467dd4e6c94f26649b7855f8e8; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74936364, jitterRate=0.11663883924484253}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T16:26:55,851 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:26:55,853 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., pid=11, masterSystemTime=1734020815814 2024-12-12T16:26:55,856 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:55,856 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:55,857 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=b22602467dd4e6c94f26649b7855f8e8, regionState=OPEN, openSeqNum=2, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:55,864 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-12T16:26:55,864 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 in 200 msec 2024-12-12T16:26:55,868 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-12T16:26:55,868 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b22602467dd4e6c94f26649b7855f8e8, ASSIGN in 361 msec 2024-12-12T16:26:55,869 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T16:26:55,869 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020815869"}]},"ts":"1734020815869"} 2024-12-12T16:26:55,872 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T16:26:55,876 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T16:26:55,879 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2340 sec 2024-12-12T16:26:56,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T16:26:56,774 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-12T16:26:56,778 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e98ea32 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b9fcedf 2024-12-12T16:26:56,782 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e71e468, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:26:56,785 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:26:56,787 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35694, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:26:56,791 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T16:26:56,793 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45084, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T16:26:56,800 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12885408 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9bd0964 2024-12-12T16:26:56,804 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c63ae4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:26:56,806 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72b32f98 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1324ee83 2024-12-12T16:26:56,810 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@736f1673, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:26:56,811 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04977266 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@45b55c24 2024-12-12T16:26:56,814 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ee2166f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:26:56,815 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bbb5d8a to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@48068a5 2024-12-12T16:26:56,819 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f34ff67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:26:56,820 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x18603bb9 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3883f7b 2024-12-12T16:26:56,823 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b5f27aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:26:56,825 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72e97e4b to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12a1285d 2024-12-12T16:26:56,829 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c3b736e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:26:56,830 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x490457fd to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@527c6d40 2024-12-12T16:26:56,834 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@353bc462, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:26:56,836 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c8de680 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47fe2fa7 2024-12-12T16:26:56,841 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6502d571, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:26:56,842 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f6b07e3 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@595e9ebe 2024-12-12T16:26:56,846 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a0471b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:26:56,853 DEBUG [hconnection-0x4c21a880-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:26:56,858 DEBUG [hconnection-0x7da3ac9b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:26:56,858 DEBUG [hconnection-0x4ee426b5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:26:56,858 DEBUG [hconnection-0x2522abb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:26:56,859 DEBUG [hconnection-0x1c8a5105-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:26:56,859 DEBUG [hconnection-0x62bc4cf2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:26:56,859 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:26:56,860 DEBUG [hconnection-0x6c4136b4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:26:56,861 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35706, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:26:56,862 DEBUG [hconnection-0x79bb3058-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:26:56,863 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35722, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:26:56,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-12T16:26:56,870 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35734, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:26:56,870 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:26:56,871 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35742, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:26:56,872 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:26:56,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T16:26:56,874 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:26:56,889 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35756, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:26:56,890 DEBUG [hconnection-0x40590c2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:26:56,893 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35762, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:26:56,901 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35766, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:26:56,917 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35780, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:26:56,923 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35768, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:26:56,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:26:56,966 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T16:26:56,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:26:56,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:56,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:26:56,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:56,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:26:56,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:56,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T16:26:57,071 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,072 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T16:26:57,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:57,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:26:57,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:57,092 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:57,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:57,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:57,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/46a10f78d1664e318fc3d1dc777d4d3f is 50, key is test_row_0/A:col10/1734020816915/Put/seqid=0 2024-12-12T16:26:57,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741839_1015 (size=9657) 2024-12-12T16:26:57,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/46a10f78d1664e318fc3d1dc777d4d3f 2024-12-12T16:26:57,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T16:26:57,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020877170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,194 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020877175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020877182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35766 deadline: 1734020877196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020877196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,258 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T16:26:57,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:57,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:26:57,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:57,266 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:57,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:57,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:57,296 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/8e746b81d1b040848e4459870142b3e0 is 50, key is test_row_0/B:col10/1734020816915/Put/seqid=0 2024-12-12T16:26:57,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741840_1016 (size=9657) 2024-12-12T16:26:57,320 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/8e746b81d1b040848e4459870142b3e0 2024-12-12T16:26:57,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35766 deadline: 1734020877331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020877334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020877334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020877333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020877335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,367 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/6a486ea3422547ca8d6c8260be212a9b is 50, key is test_row_0/C:col10/1734020816915/Put/seqid=0 2024-12-12T16:26:57,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741841_1017 (size=9657) 2024-12-12T16:26:57,388 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/6a486ea3422547ca8d6c8260be212a9b 2024-12-12T16:26:57,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/46a10f78d1664e318fc3d1dc777d4d3f as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/46a10f78d1664e318fc3d1dc777d4d3f 2024-12-12T16:26:57,423 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T16:26:57,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:57,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:26:57,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:57,426 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/46a10f78d1664e318fc3d1dc777d4d3f, entries=100, sequenceid=13, filesize=9.4 K 2024-12-12T16:26:57,425 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:57,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:57,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:57,430 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-12T16:26:57,434 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/8e746b81d1b040848e4459870142b3e0 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/8e746b81d1b040848e4459870142b3e0 2024-12-12T16:26:57,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/8e746b81d1b040848e4459870142b3e0, entries=100, sequenceid=13, filesize=9.4 K 2024-12-12T16:26:57,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/6a486ea3422547ca8d6c8260be212a9b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6a486ea3422547ca8d6c8260be212a9b 2024-12-12T16:26:57,477 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6a486ea3422547ca8d6c8260be212a9b, entries=100, sequenceid=13, filesize=9.4 K 2024-12-12T16:26:57,480 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for b22602467dd4e6c94f26649b7855f8e8 in 514ms, sequenceid=13, compaction requested=false 2024-12-12T16:26:57,481 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-12T16:26:57,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T16:26:57,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:26:57,551 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-12T16:26:57,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:26:57,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:57,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:26:57,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:57,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:26:57,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:57,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:26:57,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020877564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,581 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T16:26:57,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:57,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:26:57,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:57,583 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/b82b136edc104698af5aa7b0d6aa6190 is 50, key is test_row_0/A:col10/1734020817183/Put/seqid=0 2024-12-12T16:26:57,583 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:57,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:57,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:57,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35766 deadline: 1734020877569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020877572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020877573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020877574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741842_1018 (size=12001) 2024-12-12T16:26:57,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/b82b136edc104698af5aa7b0d6aa6190 2024-12-12T16:26:57,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/67bcb6293db3458eaea3f54723250b22 is 50, key is test_row_0/B:col10/1734020817183/Put/seqid=0 2024-12-12T16:26:57,678 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020877677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741843_1019 (size=12001) 2024-12-12T16:26:57,701 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/67bcb6293db3458eaea3f54723250b22 2024-12-12T16:26:57,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35766 deadline: 1734020877699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020877702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020877703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020877708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,736 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/435c865d72694a919527bd35b258b733 is 50, key is test_row_0/C:col10/1734020817183/Put/seqid=0 2024-12-12T16:26:57,740 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,741 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T16:26:57,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:57,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:26:57,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:57,741 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:57,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:57,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:57,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741844_1020 (size=12001) 2024-12-12T16:26:57,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/435c865d72694a919527bd35b258b733 2024-12-12T16:26:57,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/b82b136edc104698af5aa7b0d6aa6190 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b82b136edc104698af5aa7b0d6aa6190 2024-12-12T16:26:57,800 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b82b136edc104698af5aa7b0d6aa6190, entries=150, sequenceid=41, filesize=11.7 K 2024-12-12T16:26:57,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/67bcb6293db3458eaea3f54723250b22 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/67bcb6293db3458eaea3f54723250b22 2024-12-12T16:26:57,818 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/67bcb6293db3458eaea3f54723250b22, entries=150, sequenceid=41, filesize=11.7 K 2024-12-12T16:26:57,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/435c865d72694a919527bd35b258b733 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/435c865d72694a919527bd35b258b733 2024-12-12T16:26:57,838 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/435c865d72694a919527bd35b258b733, entries=150, sequenceid=41, filesize=11.7 K 2024-12-12T16:26:57,840 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=46.96 KB/48090 for b22602467dd4e6c94f26649b7855f8e8 in 289ms, sequenceid=41, compaction requested=false 2024-12-12T16:26:57,840 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:26:57,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:26:57,888 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:26:57,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:26:57,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:57,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:26:57,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:57,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:26:57,890 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:57,895 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:57,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T16:26:57,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:57,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:26:57,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:57,897 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:57,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:57,901 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/a19011640cdd421483226e850139bf0d is 50, key is test_row_0/A:col10/1734020817566/Put/seqid=0 2024-12-12T16:26:57,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:57,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741845_1021 (size=14341) 2024-12-12T16:26:57,933 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/a19011640cdd421483226e850139bf0d 2024-12-12T16:26:57,972 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/8e56483de8ad4bc782f349707117331a is 50, key is test_row_0/B:col10/1734020817566/Put/seqid=0 2024-12-12T16:26:57,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T16:26:57,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:57,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020877983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020877987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020877992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020877992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35766 deadline: 1734020877994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741846_1022 (size=12001) 2024-12-12T16:26:58,020 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/8e56483de8ad4bc782f349707117331a 2024-12-12T16:26:58,044 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/48cc60ae7c854f9fadbb095d4b7f32ee is 50, key is test_row_0/C:col10/1734020817566/Put/seqid=0 2024-12-12T16:26:58,047 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-12T16:26:58,049 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-12T16:26:58,054 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,055 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T16:26:58,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:58,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:26:58,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:58,056 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:58,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:58,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:58,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741847_1023 (size=12001) 2024-12-12T16:26:58,066 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/48cc60ae7c854f9fadbb095d4b7f32ee 2024-12-12T16:26:58,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/a19011640cdd421483226e850139bf0d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/a19011640cdd421483226e850139bf0d 2024-12-12T16:26:58,093 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/a19011640cdd421483226e850139bf0d, entries=200, sequenceid=52, filesize=14.0 K 2024-12-12T16:26:58,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/8e56483de8ad4bc782f349707117331a as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/8e56483de8ad4bc782f349707117331a 2024-12-12T16:26:58,105 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/8e56483de8ad4bc782f349707117331a, entries=150, sequenceid=52, filesize=11.7 K 2024-12-12T16:26:58,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/48cc60ae7c854f9fadbb095d4b7f32ee as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/48cc60ae7c854f9fadbb095d4b7f32ee 2024-12-12T16:26:58,108 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020878096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020878114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,120 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/48cc60ae7c854f9fadbb095d4b7f32ee, entries=150, sequenceid=52, filesize=11.7 K 2024-12-12T16:26:58,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020878117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,122 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b22602467dd4e6c94f26649b7855f8e8 in 234ms, sequenceid=52, compaction requested=true 2024-12-12T16:26:58,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:26:58,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:26:58,136 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:26:58,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:26:58,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:26:58,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:26:58,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:26:58,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:26:58,137 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:26:58,137 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:26:58,138 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-12T16:26:58,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:26:58,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:58,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:26:58,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:58,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:26:58,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:58,143 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:26:58,145 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/B is initiating minor compaction (all files) 2024-12-12T16:26:58,145 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/B in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:58,146 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/8e746b81d1b040848e4459870142b3e0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/67bcb6293db3458eaea3f54723250b22, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/8e56483de8ad4bc782f349707117331a] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=32.9 K 2024-12-12T16:26:58,147 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e746b81d1b040848e4459870142b3e0, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1734020816915 2024-12-12T16:26:58,152 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/bbb50af00050449c9a4df8e31e2faeff is 50, key is test_row_0/A:col10/1734020818124/Put/seqid=0 2024-12-12T16:26:58,153 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 67bcb6293db3458eaea3f54723250b22, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734020817183 2024-12-12T16:26:58,154 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e56483de8ad4bc782f349707117331a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734020817565 2024-12-12T16:26:58,155 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35999 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:26:58,155 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/A is initiating minor compaction (all files) 2024-12-12T16:26:58,155 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/A in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:58,155 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/46a10f78d1664e318fc3d1dc777d4d3f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b82b136edc104698af5aa7b0d6aa6190, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/a19011640cdd421483226e850139bf0d] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=35.2 K 2024-12-12T16:26:58,156 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46a10f78d1664e318fc3d1dc777d4d3f, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1734020816915 2024-12-12T16:26:58,159 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting b82b136edc104698af5aa7b0d6aa6190, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734020817183 2024-12-12T16:26:58,160 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting a19011640cdd421483226e850139bf0d, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734020817564 2024-12-12T16:26:58,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35766 deadline: 1734020878165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020878165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741848_1024 (size=16681) 2024-12-12T16:26:58,176 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/bbb50af00050449c9a4df8e31e2faeff 2024-12-12T16:26:58,203 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#B#compaction#10 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:26:58,204 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/5ee6c18f9a1f4386ba4a5f825c4c0601 is 50, key is test_row_0/B:col10/1734020817566/Put/seqid=0 2024-12-12T16:26:58,211 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T16:26:58,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:58,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:26:58,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:58,212 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:58,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:58,213 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#A#compaction#11 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:26:58,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:58,214 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/e10aee92b83b4b52b71eca3b65424d7d is 50, key is test_row_0/A:col10/1734020817566/Put/seqid=0 2024-12-12T16:26:58,221 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/d7001e1b241c4e6d9f0f8e1be0cbc0d5 is 50, key is test_row_0/B:col10/1734020818124/Put/seqid=0 2024-12-12T16:26:58,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741849_1025 (size=12104) 2024-12-12T16:26:58,253 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/5ee6c18f9a1f4386ba4a5f825c4c0601 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/5ee6c18f9a1f4386ba4a5f825c4c0601 2024-12-12T16:26:58,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741850_1026 (size=12104) 2024-12-12T16:26:58,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35766 deadline: 1734020878272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020878272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,277 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/B of b22602467dd4e6c94f26649b7855f8e8 into 5ee6c18f9a1f4386ba4a5f825c4c0601(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:26:58,278 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:26:58,278 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/B, priority=13, startTime=1734020818137; duration=0sec 2024-12-12T16:26:58,278 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:26:58,278 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:B 2024-12-12T16:26:58,279 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:26:58,281 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:26:58,281 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/C is initiating minor compaction (all files) 2024-12-12T16:26:58,281 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/C in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:58,282 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6a486ea3422547ca8d6c8260be212a9b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/435c865d72694a919527bd35b258b733, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/48cc60ae7c854f9fadbb095d4b7f32ee] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=32.9 K 2024-12-12T16:26:58,282 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a486ea3422547ca8d6c8260be212a9b, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1734020816915 2024-12-12T16:26:58,283 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 435c865d72694a919527bd35b258b733, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734020817183 2024-12-12T16:26:58,284 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 48cc60ae7c854f9fadbb095d4b7f32ee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734020817565 2024-12-12T16:26:58,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741851_1027 (size=12001) 2024-12-12T16:26:58,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020878313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020878319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,327 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#C#compaction#13 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:26:58,328 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/1981242d39b148a991f60ce3cfce17d5 is 50, key is test_row_0/C:col10/1734020817566/Put/seqid=0 2024-12-12T16:26:58,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020878325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741852_1028 (size=12104) 2024-12-12T16:26:58,366 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,366 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T16:26:58,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:58,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:26:58,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:58,367 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:58,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:58,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:58,378 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/1981242d39b148a991f60ce3cfce17d5 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/1981242d39b148a991f60ce3cfce17d5 2024-12-12T16:26:58,391 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/C of b22602467dd4e6c94f26649b7855f8e8 into 1981242d39b148a991f60ce3cfce17d5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:26:58,391 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:26:58,391 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/C, priority=13, startTime=1734020818137; duration=0sec 2024-12-12T16:26:58,391 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:26:58,391 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:C 2024-12-12T16:26:58,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020878478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35766 deadline: 1734020878478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,520 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,521 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T16:26:58,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:58,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:26:58,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:58,522 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:58,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:58,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:58,625 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020878623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020878625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020878641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,675 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T16:26:58,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:58,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:26:58,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:58,677 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:58,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:58,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:58,688 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/e10aee92b83b4b52b71eca3b65424d7d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/e10aee92b83b4b52b71eca3b65424d7d 2024-12-12T16:26:58,696 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/d7001e1b241c4e6d9f0f8e1be0cbc0d5 2024-12-12T16:26:58,717 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/A of b22602467dd4e6c94f26649b7855f8e8 into e10aee92b83b4b52b71eca3b65424d7d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:26:58,717 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:26:58,718 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/A, priority=13, startTime=1734020818124; duration=0sec 2024-12-12T16:26:58,720 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:26:58,720 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:A 2024-12-12T16:26:58,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/17ba32f110e94ca8b69014f889d1099a is 50, key is test_row_0/C:col10/1734020818124/Put/seqid=0 2024-12-12T16:26:58,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741853_1029 (size=12001) 2024-12-12T16:26:58,755 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/17ba32f110e94ca8b69014f889d1099a 2024-12-12T16:26:58,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/bbb50af00050449c9a4df8e31e2faeff as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/bbb50af00050449c9a4df8e31e2faeff 2024-12-12T16:26:58,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/bbb50af00050449c9a4df8e31e2faeff, entries=250, sequenceid=79, filesize=16.3 K 2024-12-12T16:26:58,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020878783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:58,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35766 deadline: 1734020878783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/d7001e1b241c4e6d9f0f8e1be0cbc0d5 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/d7001e1b241c4e6d9f0f8e1be0cbc0d5 2024-12-12T16:26:58,813 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/d7001e1b241c4e6d9f0f8e1be0cbc0d5, entries=150, sequenceid=79, filesize=11.7 K 2024-12-12T16:26:58,818 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T16:26:58,819 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/17ba32f110e94ca8b69014f889d1099a as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/17ba32f110e94ca8b69014f889d1099a 2024-12-12T16:26:58,831 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,832 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T16:26:58,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:58,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:26:58,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:58,833 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:58,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:58,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:26:58,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/17ba32f110e94ca8b69014f889d1099a, entries=150, sequenceid=79, filesize=11.7 K 2024-12-12T16:26:58,865 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for b22602467dd4e6c94f26649b7855f8e8 in 727ms, sequenceid=79, compaction requested=false 2024-12-12T16:26:58,865 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:26:58,986 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:58,987 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T16:26:58,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T16:26:58,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:58,987 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-12T16:26:58,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:26:58,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:58,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:26:58,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:58,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:26:58,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:59,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/57679d0e21ac4b0e931b2e23f5561639 is 50, key is test_row_0/A:col10/1734020818161/Put/seqid=0 2024-12-12T16:26:59,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741854_1030 (size=12001) 2024-12-12T16:26:59,042 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/57679d0e21ac4b0e931b2e23f5561639 2024-12-12T16:26:59,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/f926d053f316444a981b217bf788bb97 is 50, key is test_row_0/B:col10/1734020818161/Put/seqid=0 2024-12-12T16:26:59,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741855_1031 (size=12001) 2024-12-12T16:26:59,130 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/f926d053f316444a981b217bf788bb97 2024-12-12T16:26:59,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:26:59,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:26:59,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/223a365727714d4295db6e6d53bb584f is 50, key is test_row_0/C:col10/1734020818161/Put/seqid=0 2024-12-12T16:26:59,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741856_1032 (size=12001) 2024-12-12T16:26:59,177 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/223a365727714d4295db6e6d53bb584f 2024-12-12T16:26:59,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/57679d0e21ac4b0e931b2e23f5561639 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/57679d0e21ac4b0e931b2e23f5561639 2024-12-12T16:26:59,207 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/57679d0e21ac4b0e931b2e23f5561639, entries=150, sequenceid=91, filesize=11.7 K 2024-12-12T16:26:59,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/f926d053f316444a981b217bf788bb97 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f926d053f316444a981b217bf788bb97 2024-12-12T16:26:59,216 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-12T16:26:59,217 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-12T16:26:59,220 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-12T16:26:59,220 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-12T16:26:59,223 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-12T16:26:59,223 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-12T16:26:59,224 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-12T16:26:59,224 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-12T16:26:59,226 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-12T16:26:59,226 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-12T16:26:59,229 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f926d053f316444a981b217bf788bb97, entries=150, sequenceid=91, filesize=11.7 K 2024-12-12T16:26:59,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/223a365727714d4295db6e6d53bb584f as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/223a365727714d4295db6e6d53bb584f 2024-12-12T16:26:59,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:59,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020879235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:59,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:59,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020879239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:59,246 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/223a365727714d4295db6e6d53bb584f, entries=150, sequenceid=91, filesize=11.7 K 2024-12-12T16:26:59,248 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for b22602467dd4e6c94f26649b7855f8e8 in 261ms, sequenceid=91, compaction requested=true 2024-12-12T16:26:59,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:26:59,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:59,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-12T16:26:59,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-12T16:26:59,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:26:59,256 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-12T16:26:59,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:26:59,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:59,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:26:59,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:59,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:26:59,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:59,262 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-12T16:26:59,262 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3760 sec 2024-12-12T16:26:59,268 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 2.4020 sec 2024-12-12T16:26:59,287 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/7a6ace4f69964854a7d8fff97095ae9a is 50, key is test_row_0/A:col10/1734020819253/Put/seqid=0 2024-12-12T16:26:59,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:59,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35766 deadline: 1734020879292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:59,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:59,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020879302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:59,306 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:59,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020879302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:59,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741857_1033 (size=12001) 2024-12-12T16:26:59,333 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/7a6ace4f69964854a7d8fff97095ae9a 2024-12-12T16:26:59,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:59,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020879345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:59,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:59,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020879347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:59,371 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/9ff6664c67854f3eb5b42081bf76263f is 50, key is test_row_0/B:col10/1734020819253/Put/seqid=0 2024-12-12T16:26:59,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:59,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020879408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:59,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:59,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020879409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:59,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741858_1034 (size=12001) 2024-12-12T16:26:59,413 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/9ff6664c67854f3eb5b42081bf76263f 2024-12-12T16:26:59,439 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/88daa93c782c4000b40f7f01d9921bae is 50, key is test_row_0/C:col10/1734020819253/Put/seqid=0 2024-12-12T16:26:59,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741859_1035 (size=12001) 2024-12-12T16:26:59,472 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/88daa93c782c4000b40f7f01d9921bae 2024-12-12T16:26:59,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/7a6ace4f69964854a7d8fff97095ae9a as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/7a6ace4f69964854a7d8fff97095ae9a 2024-12-12T16:26:59,495 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/7a6ace4f69964854a7d8fff97095ae9a, entries=150, sequenceid=120, filesize=11.7 K 2024-12-12T16:26:59,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/9ff6664c67854f3eb5b42081bf76263f as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/9ff6664c67854f3eb5b42081bf76263f 2024-12-12T16:26:59,507 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/9ff6664c67854f3eb5b42081bf76263f, entries=150, sequenceid=120, filesize=11.7 K 2024-12-12T16:26:59,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/88daa93c782c4000b40f7f01d9921bae as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/88daa93c782c4000b40f7f01d9921bae 2024-12-12T16:26:59,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/88daa93c782c4000b40f7f01d9921bae, entries=150, sequenceid=120, filesize=11.7 K 2024-12-12T16:26:59,522 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for b22602467dd4e6c94f26649b7855f8e8 in 267ms, sequenceid=120, compaction requested=true 2024-12-12T16:26:59,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:26:59,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:26:59,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:26:59,523 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:26:59,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:26:59,523 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:26:59,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:26:59,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:26:59,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:26:59,526 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52787 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:26:59,526 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/A is initiating minor compaction (all files) 2024-12-12T16:26:59,526 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/A in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:59,526 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:26:59,526 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/B is initiating minor compaction (all files) 2024-12-12T16:26:59,526 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/e10aee92b83b4b52b71eca3b65424d7d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/bbb50af00050449c9a4df8e31e2faeff, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/57679d0e21ac4b0e931b2e23f5561639, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/7a6ace4f69964854a7d8fff97095ae9a] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=51.5 K 2024-12-12T16:26:59,526 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/B in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:26:59,527 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/5ee6c18f9a1f4386ba4a5f825c4c0601, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/d7001e1b241c4e6d9f0f8e1be0cbc0d5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f926d053f316444a981b217bf788bb97, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/9ff6664c67854f3eb5b42081bf76263f] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=47.0 K 2024-12-12T16:26:59,527 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting e10aee92b83b4b52b71eca3b65424d7d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734020817565 2024-12-12T16:26:59,527 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ee6c18f9a1f4386ba4a5f825c4c0601, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734020817565 2024-12-12T16:26:59,528 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbb50af00050449c9a4df8e31e2faeff, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734020817982 2024-12-12T16:26:59,528 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting d7001e1b241c4e6d9f0f8e1be0cbc0d5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734020817982 2024-12-12T16:26:59,529 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57679d0e21ac4b0e931b2e23f5561639, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734020818142 2024-12-12T16:26:59,529 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting f926d053f316444a981b217bf788bb97, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734020818142 2024-12-12T16:26:59,530 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a6ace4f69964854a7d8fff97095ae9a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1734020819218 2024-12-12T16:26:59,530 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ff6664c67854f3eb5b42081bf76263f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1734020819218 2024-12-12T16:26:59,570 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:26:59,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:26:59,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:26:59,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:59,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:26:59,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:59,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:26:59,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:26:59,572 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#B#compaction#21 average throughput is 0.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:26:59,572 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#A#compaction#22 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:26:59,573 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/06e56604a74942febc1a3fc9be306cc1 is 50, key is test_row_0/B:col10/1734020819253/Put/seqid=0 2024-12-12T16:26:59,573 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/993dddae5f9e482e8c1f8d0fbb27f98e is 50, key is test_row_0/A:col10/1734020819253/Put/seqid=0 2024-12-12T16:26:59,584 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/2ff4386772c841f68fd142432650dc2b is 50, key is test_row_0/A:col10/1734020819567/Put/seqid=0 2024-12-12T16:26:59,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741860_1036 (size=12241) 2024-12-12T16:26:59,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741861_1037 (size=12241) 2024-12-12T16:26:59,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741862_1038 (size=16831) 2024-12-12T16:26:59,621 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/2ff4386772c841f68fd142432650dc2b 2024-12-12T16:26:59,651 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/c4058445651f4343bd8465ba3e2fe75a is 50, key is test_row_0/B:col10/1734020819567/Put/seqid=0 2024-12-12T16:26:59,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741863_1039 (size=12101) 2024-12-12T16:26:59,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:59,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020879694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:59,713 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:59,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020879701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:59,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:59,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020879704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:59,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:59,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020879707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:59,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:59,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020879816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:59,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:59,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020879816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:59,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:59,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020879817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:26:59,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:26:59,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020879825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:00,025 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/06e56604a74942febc1a3fc9be306cc1 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/06e56604a74942febc1a3fc9be306cc1 2024-12-12T16:27:00,027 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/993dddae5f9e482e8c1f8d0fbb27f98e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/993dddae5f9e482e8c1f8d0fbb27f98e 2024-12-12T16:27:00,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:00,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020880023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:00,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:00,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020880024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:00,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:00,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020880024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:00,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020880032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:00,040 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/B of b22602467dd4e6c94f26649b7855f8e8 into 06e56604a74942febc1a3fc9be306cc1(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:00,040 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/A of b22602467dd4e6c94f26649b7855f8e8 into 993dddae5f9e482e8c1f8d0fbb27f98e(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:00,040 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:00,040 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:00,040 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/B, priority=12, startTime=1734020819523; duration=0sec 2024-12-12T16:27:00,041 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/A, priority=12, startTime=1734020819523; duration=0sec 2024-12-12T16:27:00,041 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:00,041 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:A 2024-12-12T16:27:00,041 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:00,042 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:B 2024-12-12T16:27:00,042 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:27:00,046 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:27:00,046 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/C is initiating minor compaction (all files) 2024-12-12T16:27:00,046 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/C in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:00,046 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/1981242d39b148a991f60ce3cfce17d5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/17ba32f110e94ca8b69014f889d1099a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/223a365727714d4295db6e6d53bb584f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/88daa93c782c4000b40f7f01d9921bae] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=47.0 K 2024-12-12T16:27:00,047 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1981242d39b148a991f60ce3cfce17d5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734020817565 2024-12-12T16:27:00,048 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17ba32f110e94ca8b69014f889d1099a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734020817982 2024-12-12T16:27:00,049 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 223a365727714d4295db6e6d53bb584f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734020818142 2024-12-12T16:27:00,049 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 88daa93c782c4000b40f7f01d9921bae, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1734020819218 2024-12-12T16:27:00,076 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#C#compaction#25 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:00,077 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/d597f6ce6bd644dfaf71cefaab7d118c is 50, key is test_row_0/C:col10/1734020819253/Put/seqid=0 2024-12-12T16:27:00,087 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/c4058445651f4343bd8465ba3e2fe75a 2024-12-12T16:27:00,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/8093f08c9e374f2499dae4f16bf15166 is 50, key is test_row_0/C:col10/1734020819567/Put/seqid=0 2024-12-12T16:27:00,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741864_1040 (size=12241) 2024-12-12T16:27:00,142 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/d597f6ce6bd644dfaf71cefaab7d118c as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/d597f6ce6bd644dfaf71cefaab7d118c 2024-12-12T16:27:00,154 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/C of b22602467dd4e6c94f26649b7855f8e8 into d597f6ce6bd644dfaf71cefaab7d118c(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:00,154 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:00,154 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/C, priority=12, startTime=1734020819523; duration=0sec 2024-12-12T16:27:00,154 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:00,155 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:C 2024-12-12T16:27:00,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741865_1041 (size=12101) 2024-12-12T16:27:00,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:00,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35766 deadline: 1734020880315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:00,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:00,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020880337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:00,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:00,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020880338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:00,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:00,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020880345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:00,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:00,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020880345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:00,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/8093f08c9e374f2499dae4f16bf15166 2024-12-12T16:27:00,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/2ff4386772c841f68fd142432650dc2b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/2ff4386772c841f68fd142432650dc2b 2024-12-12T16:27:00,598 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/2ff4386772c841f68fd142432650dc2b, entries=250, sequenceid=131, filesize=16.4 K 2024-12-12T16:27:00,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/c4058445651f4343bd8465ba3e2fe75a as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/c4058445651f4343bd8465ba3e2fe75a 2024-12-12T16:27:00,611 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/c4058445651f4343bd8465ba3e2fe75a, entries=150, sequenceid=131, filesize=11.8 K 2024-12-12T16:27:00,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/8093f08c9e374f2499dae4f16bf15166 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/8093f08c9e374f2499dae4f16bf15166 2024-12-12T16:27:00,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/8093f08c9e374f2499dae4f16bf15166, entries=150, sequenceid=131, filesize=11.8 K 2024-12-12T16:27:00,624 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b22602467dd4e6c94f26649b7855f8e8 in 1053ms, sequenceid=131, compaction requested=false 2024-12-12T16:27:00,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:00,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T16:27:00,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:00,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:00,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:00,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:00,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:00,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:00,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:00,859 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/fd5bee516fa9427da605f9f06469e6ba is 50, key is test_row_0/A:col10/1734020820841/Put/seqid=0 2024-12-12T16:27:00,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:00,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020880861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:00,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:00,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020880863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:00,869 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:00,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020880866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:00,869 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:00,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020880867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:00,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741866_1042 (size=14541) 2024-12-12T16:27:00,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:00,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020880968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:00,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:00,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020880970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:00,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:00,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020880971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:00,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:00,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020880972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:00,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T16:27:00,988 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-12T16:27:00,991 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:27:00,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-12T16:27:00,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T16:27:00,998 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:27:00,999 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:27:00,999 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:27:01,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T16:27:01,154 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:01,155 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-12T16:27:01,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:01,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:01,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:01,156 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:01,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:01,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:01,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:01,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020881176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:01,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:01,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:01,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020881178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:01,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020881177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:01,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:01,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020881179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:01,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T16:27:01,304 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/fd5bee516fa9427da605f9f06469e6ba 2024-12-12T16:27:01,313 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:01,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-12T16:27:01,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:01,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:01,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:01,315 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:01,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:01,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:01,326 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/1755ee0659d74535989af2a323b8040e is 50, key is test_row_0/B:col10/1734020820841/Put/seqid=0 2024-12-12T16:27:01,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741867_1043 (size=12151) 2024-12-12T16:27:01,359 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/1755ee0659d74535989af2a323b8040e 2024-12-12T16:27:01,382 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/7fe9d4d6a93248f1a38b7fde90c32183 is 50, key is test_row_0/C:col10/1734020820841/Put/seqid=0 2024-12-12T16:27:01,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741868_1044 (size=12151) 2024-12-12T16:27:01,403 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/7fe9d4d6a93248f1a38b7fde90c32183 2024-12-12T16:27:01,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/fd5bee516fa9427da605f9f06469e6ba as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/fd5bee516fa9427da605f9f06469e6ba 2024-12-12T16:27:01,428 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/fd5bee516fa9427da605f9f06469e6ba, entries=200, sequenceid=160, filesize=14.2 K 2024-12-12T16:27:01,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/1755ee0659d74535989af2a323b8040e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/1755ee0659d74535989af2a323b8040e 2024-12-12T16:27:01,448 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/1755ee0659d74535989af2a323b8040e, entries=150, sequenceid=160, filesize=11.9 K 2024-12-12T16:27:01,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/7fe9d4d6a93248f1a38b7fde90c32183 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/7fe9d4d6a93248f1a38b7fde90c32183 2024-12-12T16:27:01,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/7fe9d4d6a93248f1a38b7fde90c32183, entries=150, sequenceid=160, filesize=11.9 K 2024-12-12T16:27:01,468 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:01,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for b22602467dd4e6c94f26649b7855f8e8 in 623ms, sequenceid=160, compaction requested=true 2024-12-12T16:27:01,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:01,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:27:01,469 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:01,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:01,469 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-12T16:27:01,469 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:01,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:01,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:27:01,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:01,470 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:27:01,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:27:01,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:01,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:01,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:01,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:01,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:01,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:01,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:01,473 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43613 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:01,473 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/A is initiating minor compaction (all files) 2024-12-12T16:27:01,474 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/A in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:01,474 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/993dddae5f9e482e8c1f8d0fbb27f98e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/2ff4386772c841f68fd142432650dc2b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/fd5bee516fa9427da605f9f06469e6ba] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=42.6 K 2024-12-12T16:27:01,474 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:01,474 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/B is initiating minor compaction (all files) 2024-12-12T16:27:01,474 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/B in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:01,475 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/06e56604a74942febc1a3fc9be306cc1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/c4058445651f4343bd8465ba3e2fe75a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/1755ee0659d74535989af2a323b8040e] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=35.6 K 2024-12-12T16:27:01,476 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 993dddae5f9e482e8c1f8d0fbb27f98e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1734020819218 2024-12-12T16:27:01,477 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 06e56604a74942febc1a3fc9be306cc1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1734020819218 2024-12-12T16:27:01,477 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ff4386772c841f68fd142432650dc2b, keycount=250, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734020819274 2024-12-12T16:27:01,478 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting c4058445651f4343bd8465ba3e2fe75a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734020819298 2024-12-12T16:27:01,479 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 1755ee0659d74535989af2a323b8040e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734020819691 2024-12-12T16:27:01,480 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd5bee516fa9427da605f9f06469e6ba, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734020819691 2024-12-12T16:27:01,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/cd920d4432ad4f23b101bdb2b2ea6d46 is 50, key is test_row_0/A:col10/1734020820864/Put/seqid=0 2024-12-12T16:27:01,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:01,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:01,502 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#A#compaction#31 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:01,503 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/c5f0835582924b2e8ffabb3fadb57481 is 50, key is test_row_0/A:col10/1734020820841/Put/seqid=0 2024-12-12T16:27:01,507 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#B#compaction#32 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:01,508 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/9cb383709d3745daa6e16060ff3d70db is 50, key is test_row_0/B:col10/1734020820841/Put/seqid=0 2024-12-12T16:27:01,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741869_1045 (size=12151) 2024-12-12T16:27:01,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:01,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020881542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:01,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:01,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020881545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:01,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:01,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020881557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:01,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:01,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020881558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:01,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741870_1046 (size=12493) 2024-12-12T16:27:01,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741871_1047 (size=12493) 2024-12-12T16:27:01,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T16:27:01,616 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/9cb383709d3745daa6e16060ff3d70db as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/9cb383709d3745daa6e16060ff3d70db 2024-12-12T16:27:01,626 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/B of b22602467dd4e6c94f26649b7855f8e8 into 9cb383709d3745daa6e16060ff3d70db(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:01,626 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:01,626 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/B, priority=13, startTime=1734020821469; duration=0sec 2024-12-12T16:27:01,627 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:01,627 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:B 2024-12-12T16:27:01,627 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:01,629 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:01,629 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/C is initiating minor compaction (all files) 2024-12-12T16:27:01,630 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/C in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:01,630 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/d597f6ce6bd644dfaf71cefaab7d118c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/8093f08c9e374f2499dae4f16bf15166, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/7fe9d4d6a93248f1a38b7fde90c32183] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=35.6 K 2024-12-12T16:27:01,631 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting d597f6ce6bd644dfaf71cefaab7d118c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1734020819218 2024-12-12T16:27:01,632 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 8093f08c9e374f2499dae4f16bf15166, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734020819298 2024-12-12T16:27:01,633 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 7fe9d4d6a93248f1a38b7fde90c32183, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734020819691 2024-12-12T16:27:01,658 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#C#compaction#33 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:01,659 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/f88203b02c8041a9ba6668013b667da1 is 50, key is test_row_0/C:col10/1734020820841/Put/seqid=0 2024-12-12T16:27:01,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:01,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020881657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:01,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:01,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020881657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:01,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:01,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020881664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:01,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:01,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020881666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:01,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741872_1048 (size=12493) 2024-12-12T16:27:01,709 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/f88203b02c8041a9ba6668013b667da1 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/f88203b02c8041a9ba6668013b667da1 2024-12-12T16:27:01,723 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/C of b22602467dd4e6c94f26649b7855f8e8 into f88203b02c8041a9ba6668013b667da1(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:01,723 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:01,723 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/C, priority=13, startTime=1734020821470; duration=0sec 2024-12-12T16:27:01,723 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:01,723 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:C 2024-12-12T16:27:01,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:01,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020881864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:01,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:01,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020881865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:01,872 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:01,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020881871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:01,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:01,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020881871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:01,947 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/cd920d4432ad4f23b101bdb2b2ea6d46 2024-12-12T16:27:01,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/245c9293a1a24a8fb9ccb74f5897fad7 is 50, key is test_row_0/B:col10/1734020820864/Put/seqid=0 2024-12-12T16:27:01,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741873_1049 (size=12151) 2024-12-12T16:27:01,992 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/245c9293a1a24a8fb9ccb74f5897fad7 2024-12-12T16:27:02,010 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/c5f0835582924b2e8ffabb3fadb57481 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/c5f0835582924b2e8ffabb3fadb57481 2024-12-12T16:27:02,024 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/A of b22602467dd4e6c94f26649b7855f8e8 into c5f0835582924b2e8ffabb3fadb57481(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:02,024 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:02,024 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/A, priority=13, startTime=1734020821469; duration=0sec 2024-12-12T16:27:02,024 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:02,024 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:A 2024-12-12T16:27:02,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/985344ea826143768369a31f786e205b is 50, key is test_row_0/C:col10/1734020820864/Put/seqid=0 2024-12-12T16:27:02,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741874_1050 (size=12151) 2024-12-12T16:27:02,051 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/985344ea826143768369a31f786e205b 2024-12-12T16:27:02,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/cd920d4432ad4f23b101bdb2b2ea6d46 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cd920d4432ad4f23b101bdb2b2ea6d46 2024-12-12T16:27:02,074 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cd920d4432ad4f23b101bdb2b2ea6d46, entries=150, sequenceid=171, filesize=11.9 K 2024-12-12T16:27:02,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/245c9293a1a24a8fb9ccb74f5897fad7 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/245c9293a1a24a8fb9ccb74f5897fad7 2024-12-12T16:27:02,090 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/245c9293a1a24a8fb9ccb74f5897fad7, entries=150, sequenceid=171, filesize=11.9 K 2024-12-12T16:27:02,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/985344ea826143768369a31f786e205b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/985344ea826143768369a31f786e205b 2024-12-12T16:27:02,102 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/985344ea826143768369a31f786e205b, entries=150, sequenceid=171, filesize=11.9 K 2024-12-12T16:27:02,104 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for b22602467dd4e6c94f26649b7855f8e8 in 633ms, sequenceid=171, compaction requested=false 2024-12-12T16:27:02,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:02,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:02,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-12T16:27:02,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-12T16:27:02,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T16:27:02,110 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-12T16:27:02,110 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1070 sec 2024-12-12T16:27:02,114 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.1190 sec 2024-12-12T16:27:02,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:02,174 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-12T16:27:02,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:02,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:02,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:02,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:02,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:02,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:02,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/69aaddf8a3ba47c39393f38c1c77eb3f is 50, key is test_row_0/A:col10/1734020822171/Put/seqid=0 2024-12-12T16:27:02,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020882182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:02,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020882183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:02,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020882184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:02,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020882188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:02,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741875_1051 (size=12151) 2024-12-12T16:27:02,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020882290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:02,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020882290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:02,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020882291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:02,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020882292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:02,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35766 deadline: 1734020882325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:02,329 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4164 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:27:02,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020882495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:02,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020882496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:02,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020882496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:02,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020882497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:02,595 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/69aaddf8a3ba47c39393f38c1c77eb3f 2024-12-12T16:27:02,631 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/abdcc222e8664aa9aece339911e2dfa9 is 50, key is test_row_0/B:col10/1734020822171/Put/seqid=0 2024-12-12T16:27:02,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741876_1052 (size=12151) 2024-12-12T16:27:02,676 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/abdcc222e8664aa9aece339911e2dfa9 2024-12-12T16:27:02,704 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/d7b1d8a5e8b2419bb22b4e0e19dff7ab is 50, key is test_row_0/C:col10/1734020822171/Put/seqid=0 2024-12-12T16:27:02,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741877_1053 (size=12151) 2024-12-12T16:27:02,749 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/d7b1d8a5e8b2419bb22b4e0e19dff7ab 2024-12-12T16:27:02,763 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/69aaddf8a3ba47c39393f38c1c77eb3f as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/69aaddf8a3ba47c39393f38c1c77eb3f 2024-12-12T16:27:02,773 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/69aaddf8a3ba47c39393f38c1c77eb3f, entries=150, sequenceid=202, filesize=11.9 K 2024-12-12T16:27:02,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/abdcc222e8664aa9aece339911e2dfa9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/abdcc222e8664aa9aece339911e2dfa9 2024-12-12T16:27:02,787 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/abdcc222e8664aa9aece339911e2dfa9, entries=150, sequenceid=202, filesize=11.9 K 2024-12-12T16:27:02,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/d7b1d8a5e8b2419bb22b4e0e19dff7ab as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/d7b1d8a5e8b2419bb22b4e0e19dff7ab 2024-12-12T16:27:02,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020882800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:02,801 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/d7b1d8a5e8b2419bb22b4e0e19dff7ab, entries=150, sequenceid=202, filesize=11.9 K 2024-12-12T16:27:02,803 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for b22602467dd4e6c94f26649b7855f8e8 in 629ms, sequenceid=202, compaction requested=true 2024-12-12T16:27:02,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:02,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:27:02,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:02,803 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:02,803 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:02,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:27:02,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:02,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:27:02,805 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:02,807 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:02,807 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/B is initiating minor compaction (all files) 2024-12-12T16:27:02,807 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/B in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:02,807 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/9cb383709d3745daa6e16060ff3d70db, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/245c9293a1a24a8fb9ccb74f5897fad7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/abdcc222e8664aa9aece339911e2dfa9] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=35.9 K 2024-12-12T16:27:02,808 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:02,808 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/A is initiating minor compaction (all files) 2024-12-12T16:27:02,808 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/A in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:02,808 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/c5f0835582924b2e8ffabb3fadb57481, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cd920d4432ad4f23b101bdb2b2ea6d46, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/69aaddf8a3ba47c39393f38c1c77eb3f] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=35.9 K 2024-12-12T16:27:02,809 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 9cb383709d3745daa6e16060ff3d70db, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734020819691 2024-12-12T16:27:02,809 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5f0835582924b2e8ffabb3fadb57481, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734020819691 2024-12-12T16:27:02,810 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 245c9293a1a24a8fb9ccb74f5897fad7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734020820854 2024-12-12T16:27:02,810 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd920d4432ad4f23b101bdb2b2ea6d46, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734020820854 2024-12-12T16:27:02,811 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting abdcc222e8664aa9aece339911e2dfa9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1734020821546 2024-12-12T16:27:02,812 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69aaddf8a3ba47c39393f38c1c77eb3f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1734020821546 2024-12-12T16:27:02,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:02,819 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:27:02,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:02,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:02,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:02,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:02,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:02,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:02,833 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#A#compaction#39 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:02,834 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/8010a31ddda94e6bae5726db741304d1 is 50, key is test_row_0/A:col10/1734020822171/Put/seqid=0 2024-12-12T16:27:02,846 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#B#compaction#40 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:02,847 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/6371696bf6e446d69448f2baa1dd93f8 is 50, key is test_row_0/B:col10/1734020822171/Put/seqid=0 2024-12-12T16:27:02,857 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/cde06486c0754b61914693e0c70ec54e is 50, key is test_row_0/A:col10/1734020822817/Put/seqid=0 2024-12-12T16:27:02,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020882877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:02,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020882882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:02,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020882884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:02,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741878_1054 (size=12595) 2024-12-12T16:27:02,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741879_1055 (size=12595) 2024-12-12T16:27:02,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741880_1056 (size=12151) 2024-12-12T16:27:02,923 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/cde06486c0754b61914693e0c70ec54e 2024-12-12T16:27:02,948 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/2b9588832f3c4d54a3e6dd9ac7c41e92 is 50, key is test_row_0/B:col10/1734020822817/Put/seqid=0 2024-12-12T16:27:02,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741881_1057 (size=12151) 2024-12-12T16:27:02,959 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/2b9588832f3c4d54a3e6dd9ac7c41e92 2024-12-12T16:27:02,983 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/171a72c4f6d04ed8a4d0da5dd1c5f2d0 is 50, key is test_row_0/C:col10/1734020822817/Put/seqid=0 2024-12-12T16:27:02,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020882986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:02,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020882988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:02,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:02,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020882995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741882_1058 (size=12151) 2024-12-12T16:27:03,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T16:27:03,110 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-12T16:27:03,113 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:27:03,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-12T16:27:03,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T16:27:03,122 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:27:03,124 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:27:03,124 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:27:03,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:03,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020883192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:03,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020883199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:03,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020883200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T16:27:03,277 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,278 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-12T16:27:03,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:03,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:03,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:03,279 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:03,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:03,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:03,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:03,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020883309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,316 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/6371696bf6e446d69448f2baa1dd93f8 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/6371696bf6e446d69448f2baa1dd93f8 2024-12-12T16:27:03,319 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/8010a31ddda94e6bae5726db741304d1 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/8010a31ddda94e6bae5726db741304d1 2024-12-12T16:27:03,329 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/A of b22602467dd4e6c94f26649b7855f8e8 into 8010a31ddda94e6bae5726db741304d1(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:03,329 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:03,329 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/A, priority=13, startTime=1734020822803; duration=0sec 2024-12-12T16:27:03,329 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:03,329 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:A 2024-12-12T16:27:03,329 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:03,330 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/B of b22602467dd4e6c94f26649b7855f8e8 into 6371696bf6e446d69448f2baa1dd93f8(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:03,331 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:03,331 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/B, priority=13, startTime=1734020822803; duration=0sec 2024-12-12T16:27:03,331 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:03,331 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:B 2024-12-12T16:27:03,331 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:03,332 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/C is initiating minor compaction (all files) 2024-12-12T16:27:03,332 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/C in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:03,332 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/f88203b02c8041a9ba6668013b667da1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/985344ea826143768369a31f786e205b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/d7b1d8a5e8b2419bb22b4e0e19dff7ab] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=35.9 K 2024-12-12T16:27:03,332 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting f88203b02c8041a9ba6668013b667da1, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734020819691 2024-12-12T16:27:03,333 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 985344ea826143768369a31f786e205b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734020820854 2024-12-12T16:27:03,334 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7b1d8a5e8b2419bb22b4e0e19dff7ab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1734020821546 2024-12-12T16:27:03,353 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#C#compaction#44 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:03,353 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/efd99a9c6c2042ebbe4ee4ef07857423 is 50, key is test_row_0/C:col10/1734020822171/Put/seqid=0 2024-12-12T16:27:03,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741883_1059 (size=12595) 2024-12-12T16:27:03,398 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/efd99a9c6c2042ebbe4ee4ef07857423 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/efd99a9c6c2042ebbe4ee4ef07857423 2024-12-12T16:27:03,408 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/171a72c4f6d04ed8a4d0da5dd1c5f2d0 2024-12-12T16:27:03,414 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/C of b22602467dd4e6c94f26649b7855f8e8 into efd99a9c6c2042ebbe4ee4ef07857423(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:03,414 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:03,414 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/C, priority=13, startTime=1734020822804; duration=0sec 2024-12-12T16:27:03,415 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:03,415 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:C 2024-12-12T16:27:03,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/cde06486c0754b61914693e0c70ec54e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cde06486c0754b61914693e0c70ec54e 2024-12-12T16:27:03,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T16:27:03,426 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cde06486c0754b61914693e0c70ec54e, entries=150, sequenceid=215, filesize=11.9 K 2024-12-12T16:27:03,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/2b9588832f3c4d54a3e6dd9ac7c41e92 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/2b9588832f3c4d54a3e6dd9ac7c41e92 2024-12-12T16:27:03,433 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-12T16:27:03,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:03,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:03,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:03,434 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:03,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:03,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:03,439 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/2b9588832f3c4d54a3e6dd9ac7c41e92, entries=150, sequenceid=215, filesize=11.9 K 2024-12-12T16:27:03,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/171a72c4f6d04ed8a4d0da5dd1c5f2d0 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/171a72c4f6d04ed8a4d0da5dd1c5f2d0 2024-12-12T16:27:03,448 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/171a72c4f6d04ed8a4d0da5dd1c5f2d0, entries=150, sequenceid=215, filesize=11.9 K 2024-12-12T16:27:03,450 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for b22602467dd4e6c94f26649b7855f8e8 in 632ms, sequenceid=215, compaction requested=false 2024-12-12T16:27:03,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:03,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:03,504 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T16:27:03,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:03,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:03,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:03,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:03,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:03,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:03,514 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/cd0b575ee33b4879923196b38cec0a28 is 50, key is test_row_0/A:col10/1734020822882/Put/seqid=0 2024-12-12T16:27:03,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:03,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020883528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:03,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:03,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020883530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020883528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741884_1060 (size=14541) 2024-12-12T16:27:03,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/cd0b575ee33b4879923196b38cec0a28 2024-12-12T16:27:03,559 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/bce9270aa6e64f7a834a416978725dc9 is 50, key is test_row_0/B:col10/1734020822882/Put/seqid=0 2024-12-12T16:27:03,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741885_1061 (size=12151) 2024-12-12T16:27:03,579 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/bce9270aa6e64f7a834a416978725dc9 2024-12-12T16:27:03,592 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-12T16:27:03,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:03,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:03,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:03,595 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:03,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:03,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/73b80e4e0bdb49359acb79613f23a904 is 50, key is test_row_0/C:col10/1734020822882/Put/seqid=0 2024-12-12T16:27:03,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:03,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741886_1062 (size=12151) 2024-12-12T16:27:03,631 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/73b80e4e0bdb49359acb79613f23a904 2024-12-12T16:27:03,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:03,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020883635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:03,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020883636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/cd0b575ee33b4879923196b38cec0a28 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cd0b575ee33b4879923196b38cec0a28 2024-12-12T16:27:03,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:03,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020883640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,650 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cd0b575ee33b4879923196b38cec0a28, entries=200, sequenceid=242, filesize=14.2 K 2024-12-12T16:27:03,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/bce9270aa6e64f7a834a416978725dc9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/bce9270aa6e64f7a834a416978725dc9 2024-12-12T16:27:03,660 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/bce9270aa6e64f7a834a416978725dc9, entries=150, sequenceid=242, filesize=11.9 K 2024-12-12T16:27:03,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/73b80e4e0bdb49359acb79613f23a904 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/73b80e4e0bdb49359acb79613f23a904 2024-12-12T16:27:03,671 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/73b80e4e0bdb49359acb79613f23a904, entries=150, sequenceid=242, filesize=11.9 K 2024-12-12T16:27:03,673 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for b22602467dd4e6c94f26649b7855f8e8 in 168ms, sequenceid=242, compaction requested=true 2024-12-12T16:27:03,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:03,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:27:03,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:03,673 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:03,673 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:03,674 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:27:03,675 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:03,675 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/B is initiating minor compaction (all files) 2024-12-12T16:27:03,675 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/B in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:03,676 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/6371696bf6e446d69448f2baa1dd93f8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/2b9588832f3c4d54a3e6dd9ac7c41e92, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/bce9270aa6e64f7a834a416978725dc9] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=36.0 K 2024-12-12T16:27:03,676 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39287 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:03,676 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/A is initiating minor compaction (all files) 2024-12-12T16:27:03,676 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/A in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:03,676 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/8010a31ddda94e6bae5726db741304d1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cde06486c0754b61914693e0c70ec54e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cd0b575ee33b4879923196b38cec0a28] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=38.4 K 2024-12-12T16:27:03,677 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 6371696bf6e446d69448f2baa1dd93f8, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1734020821546 2024-12-12T16:27:03,677 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8010a31ddda94e6bae5726db741304d1, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1734020821546 2024-12-12T16:27:03,677 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b9588832f3c4d54a3e6dd9ac7c41e92, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1734020822185 2024-12-12T16:27:03,678 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting cde06486c0754b61914693e0c70ec54e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1734020822185 2024-12-12T16:27:03,678 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting bce9270aa6e64f7a834a416978725dc9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1734020822880 2024-12-12T16:27:03,679 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd0b575ee33b4879923196b38cec0a28, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1734020822874 2024-12-12T16:27:03,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:03,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:27:03,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:03,695 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#B#compaction#48 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:03,697 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/84771d45202844c4b3356f4ec2e58959 is 50, key is test_row_0/B:col10/1734020822882/Put/seqid=0 2024-12-12T16:27:03,703 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#A#compaction#49 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:03,703 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/68a6dc7a24324f10b36623bbc409ecdb is 50, key is test_row_0/A:col10/1734020822882/Put/seqid=0 2024-12-12T16:27:03,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T16:27:03,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741887_1063 (size=12697) 2024-12-12T16:27:03,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741888_1064 (size=12697) 2024-12-12T16:27:03,748 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,749 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-12T16:27:03,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:03,750 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T16:27:03,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:03,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:03,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:03,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:03,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:03,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:03,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/ab18ee6b5e514f8ab84feea4f181d0d8 is 50, key is test_row_0/A:col10/1734020823527/Put/seqid=0 2024-12-12T16:27:03,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741889_1065 (size=12151) 2024-12-12T16:27:03,779 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/ab18ee6b5e514f8ab84feea4f181d0d8 2024-12-12T16:27:03,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/48bac41b55a7436d9e2e36dbc8ff1ba6 is 50, key is test_row_0/B:col10/1734020823527/Put/seqid=0 2024-12-12T16:27:03,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741890_1066 (size=12151) 2024-12-12T16:27:03,810 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/48bac41b55a7436d9e2e36dbc8ff1ba6 2024-12-12T16:27:03,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/19dd418d05d64b99aa556954f8cc7147 is 50, key is test_row_0/C:col10/1734020823527/Put/seqid=0 2024-12-12T16:27:03,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:03,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:03,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741891_1067 (size=12151) 2024-12-12T16:27:03,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:03,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020883886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:03,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020883886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:03,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020883887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:03,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020883990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:03,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:03,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020883990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:03,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020883991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:04,143 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/84771d45202844c4b3356f4ec2e58959 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/84771d45202844c4b3356f4ec2e58959 2024-12-12T16:27:04,143 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/68a6dc7a24324f10b36623bbc409ecdb as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/68a6dc7a24324f10b36623bbc409ecdb 2024-12-12T16:27:04,157 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/B of b22602467dd4e6c94f26649b7855f8e8 into 84771d45202844c4b3356f4ec2e58959(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:04,157 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:04,157 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/B, priority=13, startTime=1734020823673; duration=0sec 2024-12-12T16:27:04,158 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:04,158 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:B 2024-12-12T16:27:04,159 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:04,162 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:04,162 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/C is initiating minor compaction (all files) 2024-12-12T16:27:04,163 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/C in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:04,163 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/efd99a9c6c2042ebbe4ee4ef07857423, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/171a72c4f6d04ed8a4d0da5dd1c5f2d0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/73b80e4e0bdb49359acb79613f23a904] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=36.0 K 2024-12-12T16:27:04,163 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/A of b22602467dd4e6c94f26649b7855f8e8 into 68a6dc7a24324f10b36623bbc409ecdb(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:04,163 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:04,163 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/A, priority=13, startTime=1734020823673; duration=0sec 2024-12-12T16:27:04,163 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:04,163 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:A 2024-12-12T16:27:04,164 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting efd99a9c6c2042ebbe4ee4ef07857423, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1734020821546 2024-12-12T16:27:04,166 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 171a72c4f6d04ed8a4d0da5dd1c5f2d0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1734020822185 2024-12-12T16:27:04,166 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 73b80e4e0bdb49359acb79613f23a904, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1734020822880 2024-12-12T16:27:04,182 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#C#compaction#53 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:04,183 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/4389437e8e1d4fb187372f3582852ed9 is 50, key is test_row_0/C:col10/1734020822882/Put/seqid=0 2024-12-12T16:27:04,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:04,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020884194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:04,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:04,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020884195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:04,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:04,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020884195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:04,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741892_1068 (size=12697) 2024-12-12T16:27:04,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T16:27:04,225 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/4389437e8e1d4fb187372f3582852ed9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/4389437e8e1d4fb187372f3582852ed9 2024-12-12T16:27:04,235 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/C of b22602467dd4e6c94f26649b7855f8e8 into 4389437e8e1d4fb187372f3582852ed9(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:04,235 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:04,235 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/C, priority=13, startTime=1734020823680; duration=0sec 2024-12-12T16:27:04,236 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:04,236 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:C 2024-12-12T16:27:04,275 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/19dd418d05d64b99aa556954f8cc7147 2024-12-12T16:27:04,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/ab18ee6b5e514f8ab84feea4f181d0d8 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/ab18ee6b5e514f8ab84feea4f181d0d8 2024-12-12T16:27:04,294 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/ab18ee6b5e514f8ab84feea4f181d0d8, entries=150, sequenceid=254, filesize=11.9 K 2024-12-12T16:27:04,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/48bac41b55a7436d9e2e36dbc8ff1ba6 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/48bac41b55a7436d9e2e36dbc8ff1ba6 2024-12-12T16:27:04,303 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/48bac41b55a7436d9e2e36dbc8ff1ba6, entries=150, sequenceid=254, filesize=11.9 K 2024-12-12T16:27:04,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/19dd418d05d64b99aa556954f8cc7147 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/19dd418d05d64b99aa556954f8cc7147 2024-12-12T16:27:04,316 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/19dd418d05d64b99aa556954f8cc7147, entries=150, sequenceid=254, filesize=11.9 K 2024-12-12T16:27:04,321 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for b22602467dd4e6c94f26649b7855f8e8 in 569ms, sequenceid=254, compaction requested=false 2024-12-12T16:27:04,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:04,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:04,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-12T16:27:04,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-12T16:27:04,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:04,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T16:27:04,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:04,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:04,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:04,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:04,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:04,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:04,328 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-12T16:27:04,328 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1980 sec 2024-12-12T16:27:04,329 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/a8b6cc2125fc408e96b3c328b2d37d07 is 50, key is test_row_0/A:col10/1734020823886/Put/seqid=0 2024-12-12T16:27:04,332 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.2160 sec 2024-12-12T16:27:04,357 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:04,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020884355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:04,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741893_1069 (size=14741) 2024-12-12T16:27:04,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:04,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020884459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:04,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:04,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020884499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:04,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:04,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020884502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:04,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:04,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020884504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:04,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:04,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020884663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:04,760 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/a8b6cc2125fc408e96b3c328b2d37d07 2024-12-12T16:27:04,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/25b9f1aafc6d4dedb46f2773c50d9366 is 50, key is test_row_0/B:col10/1734020823886/Put/seqid=0 2024-12-12T16:27:04,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741894_1070 (size=12301) 2024-12-12T16:27:04,782 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/25b9f1aafc6d4dedb46f2773c50d9366 2024-12-12T16:27:04,802 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/74ee1764a6b242cabe98ee2f9f6d43e7 is 50, key is test_row_0/C:col10/1734020823886/Put/seqid=0 2024-12-12T16:27:04,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741895_1071 (size=12301) 2024-12-12T16:27:04,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:04,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020884967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:05,008 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:05,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:05,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020885007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:05,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020885006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:05,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:05,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020885010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:05,215 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/74ee1764a6b242cabe98ee2f9f6d43e7 2024-12-12T16:27:05,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/a8b6cc2125fc408e96b3c328b2d37d07 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/a8b6cc2125fc408e96b3c328b2d37d07 2024-12-12T16:27:05,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T16:27:05,224 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-12T16:27:05,226 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:27:05,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-12T16:27:05,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T16:27:05,229 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:27:05,229 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/a8b6cc2125fc408e96b3c328b2d37d07, entries=200, sequenceid=282, filesize=14.4 K 2024-12-12T16:27:05,232 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:27:05,232 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:27:05,232 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/25b9f1aafc6d4dedb46f2773c50d9366 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/25b9f1aafc6d4dedb46f2773c50d9366 2024-12-12T16:27:05,238 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/25b9f1aafc6d4dedb46f2773c50d9366, entries=150, sequenceid=282, filesize=12.0 K 2024-12-12T16:27:05,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/74ee1764a6b242cabe98ee2f9f6d43e7 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/74ee1764a6b242cabe98ee2f9f6d43e7 2024-12-12T16:27:05,248 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/74ee1764a6b242cabe98ee2f9f6d43e7, entries=150, sequenceid=282, filesize=12.0 K 2024-12-12T16:27:05,249 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b22602467dd4e6c94f26649b7855f8e8 in 928ms, sequenceid=282, compaction requested=true 2024-12-12T16:27:05,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:05,250 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:05,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:27:05,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:05,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:27:05,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:05,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:27:05,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:05,250 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:05,252 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39589 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:05,252 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/A is initiating minor compaction (all files) 2024-12-12T16:27:05,252 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/A in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:05,252 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/68a6dc7a24324f10b36623bbc409ecdb, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/ab18ee6b5e514f8ab84feea4f181d0d8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/a8b6cc2125fc408e96b3c328b2d37d07] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=38.7 K 2024-12-12T16:27:05,252 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:05,252 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/B is initiating minor compaction (all files) 2024-12-12T16:27:05,252 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/B in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:05,253 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/84771d45202844c4b3356f4ec2e58959, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/48bac41b55a7436d9e2e36dbc8ff1ba6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/25b9f1aafc6d4dedb46f2773c50d9366] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=36.3 K 2024-12-12T16:27:05,253 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68a6dc7a24324f10b36623bbc409ecdb, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1734020822880 2024-12-12T16:27:05,254 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 84771d45202844c4b3356f4ec2e58959, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1734020822880 2024-12-12T16:27:05,254 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab18ee6b5e514f8ab84feea4f181d0d8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1734020823525 2024-12-12T16:27:05,254 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 48bac41b55a7436d9e2e36dbc8ff1ba6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1734020823525 2024-12-12T16:27:05,255 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8b6cc2125fc408e96b3c328b2d37d07, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1734020823876 2024-12-12T16:27:05,255 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 25b9f1aafc6d4dedb46f2773c50d9366, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1734020823884 2024-12-12T16:27:05,286 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#A#compaction#57 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:05,286 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/87086f51df6d4e09b8c125e5df76b423 is 50, key is test_row_0/A:col10/1734020823886/Put/seqid=0 2024-12-12T16:27:05,303 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#B#compaction#58 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:05,304 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/71cd824f849d49e0bb4754520294b369 is 50, key is test_row_0/B:col10/1734020823886/Put/seqid=0 2024-12-12T16:27:05,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T16:27:05,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741897_1073 (size=12949) 2024-12-12T16:27:05,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741896_1072 (size=12949) 2024-12-12T16:27:05,363 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/71cd824f849d49e0bb4754520294b369 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/71cd824f849d49e0bb4754520294b369 2024-12-12T16:27:05,373 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/B of b22602467dd4e6c94f26649b7855f8e8 into 71cd824f849d49e0bb4754520294b369(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:05,373 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:05,373 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/B, priority=13, startTime=1734020825250; duration=0sec 2024-12-12T16:27:05,373 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:05,373 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:B 2024-12-12T16:27:05,373 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:05,375 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:05,375 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/C is initiating minor compaction (all files) 2024-12-12T16:27:05,375 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/C in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:05,375 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/4389437e8e1d4fb187372f3582852ed9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/19dd418d05d64b99aa556954f8cc7147, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/74ee1764a6b242cabe98ee2f9f6d43e7] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=36.3 K 2024-12-12T16:27:05,376 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 4389437e8e1d4fb187372f3582852ed9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=242, earliestPutTs=1734020822880 2024-12-12T16:27:05,377 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 19dd418d05d64b99aa556954f8cc7147, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1734020823525 2024-12-12T16:27:05,377 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 74ee1764a6b242cabe98ee2f9f6d43e7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1734020823884 2024-12-12T16:27:05,384 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:05,385 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T16:27:05,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:05,385 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:27:05,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:05,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:05,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:05,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:05,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:05,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:05,398 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#C#compaction#59 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:05,399 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/822f4eb2675d4721b5ee245f9394ee74 is 50, key is test_row_0/C:col10/1734020823886/Put/seqid=0 2024-12-12T16:27:05,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/6033680fb6eb44118d50b7844e7cc96f is 50, key is test_row_0/A:col10/1734020824353/Put/seqid=0 2024-12-12T16:27:05,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741898_1074 (size=12949) 2024-12-12T16:27:05,432 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/822f4eb2675d4721b5ee245f9394ee74 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/822f4eb2675d4721b5ee245f9394ee74 2024-12-12T16:27:05,446 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/C of b22602467dd4e6c94f26649b7855f8e8 into 822f4eb2675d4721b5ee245f9394ee74(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:05,446 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:05,446 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/C, priority=13, startTime=1734020825250; duration=0sec 2024-12-12T16:27:05,447 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:05,447 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:C 2024-12-12T16:27:05,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741899_1075 (size=12301) 2024-12-12T16:27:05,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:05,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:05,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T16:27:05,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:05,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020885549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:05,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:05,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020885658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:05,772 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/87086f51df6d4e09b8c125e5df76b423 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/87086f51df6d4e09b8c125e5df76b423 2024-12-12T16:27:05,785 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/A of b22602467dd4e6c94f26649b7855f8e8 into 87086f51df6d4e09b8c125e5df76b423(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:05,785 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:05,785 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/A, priority=13, startTime=1734020825250; duration=0sec 2024-12-12T16:27:05,786 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:05,786 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:A 2024-12-12T16:27:05,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T16:27:05,860 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/6033680fb6eb44118d50b7844e7cc96f 2024-12-12T16:27:05,863 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:05,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020885861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:05,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/3f2018d854f84f6ba5211e756b71f572 is 50, key is test_row_0/B:col10/1734020824353/Put/seqid=0 2024-12-12T16:27:05,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741900_1076 (size=12301) 2024-12-12T16:27:06,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:06,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020886010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:06,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:06,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020886016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:06,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:06,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020886018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:06,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:06,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020886169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:06,292 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/3f2018d854f84f6ba5211e756b71f572 2024-12-12T16:27:06,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/12ba351cab2f4ed8b80100f982669678 is 50, key is test_row_0/C:col10/1734020824353/Put/seqid=0 2024-12-12T16:27:06,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741901_1077 (size=12301) 2024-12-12T16:27:06,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T16:27:06,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:06,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35766 deadline: 1734020886366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:06,368 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8204 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:27:06,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:06,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020886675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:06,711 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/12ba351cab2f4ed8b80100f982669678 2024-12-12T16:27:06,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/6033680fb6eb44118d50b7844e7cc96f as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/6033680fb6eb44118d50b7844e7cc96f 2024-12-12T16:27:06,729 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/6033680fb6eb44118d50b7844e7cc96f, entries=150, sequenceid=294, filesize=12.0 K 2024-12-12T16:27:06,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/3f2018d854f84f6ba5211e756b71f572 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/3f2018d854f84f6ba5211e756b71f572 2024-12-12T16:27:06,737 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/3f2018d854f84f6ba5211e756b71f572, entries=150, sequenceid=294, filesize=12.0 K 2024-12-12T16:27:06,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/12ba351cab2f4ed8b80100f982669678 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/12ba351cab2f4ed8b80100f982669678 2024-12-12T16:27:06,751 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/12ba351cab2f4ed8b80100f982669678, entries=150, sequenceid=294, filesize=12.0 K 2024-12-12T16:27:06,753 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b22602467dd4e6c94f26649b7855f8e8 in 1367ms, sequenceid=294, compaction requested=false 2024-12-12T16:27:06,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:06,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:06,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-12T16:27:06,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-12T16:27:06,757 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-12T16:27:06,757 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5230 sec 2024-12-12T16:27:06,760 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.5330 sec 2024-12-12T16:27:07,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T16:27:07,335 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-12T16:27:07,337 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:27:07,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-12T16:27:07,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T16:27:07,339 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:27:07,340 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:27:07,340 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:27:07,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T16:27:07,492 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:07,493 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T16:27:07,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:07,494 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T16:27:07,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:07,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:07,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:07,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:07,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:07,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:07,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/ccb54049c9ff4ef9bd6e2d19cfd9c83e is 50, key is test_row_0/A:col10/1734020825541/Put/seqid=0 2024-12-12T16:27:07,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741902_1078 (size=12301) 2024-12-12T16:27:07,511 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/ccb54049c9ff4ef9bd6e2d19cfd9c83e 2024-12-12T16:27:07,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/ff8365cce99a4c31b33f7eb69e72ac7b is 50, key is test_row_0/B:col10/1734020825541/Put/seqid=0 2024-12-12T16:27:07,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741903_1079 (size=12301) 2024-12-12T16:27:07,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T16:27:07,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:07,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:07,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:07,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020887705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:07,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:07,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020887810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:07,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T16:27:07,942 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/ff8365cce99a4c31b33f7eb69e72ac7b 2024-12-12T16:27:07,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/caa336c852854041b2fa0072fab5a582 is 50, key is test_row_0/C:col10/1734020825541/Put/seqid=0 2024-12-12T16:27:07,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741904_1080 (size=12301) 2024-12-12T16:27:07,995 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/caa336c852854041b2fa0072fab5a582 2024-12-12T16:27:08,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/ccb54049c9ff4ef9bd6e2d19cfd9c83e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/ccb54049c9ff4ef9bd6e2d19cfd9c83e 2024-12-12T16:27:08,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:08,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020888014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:08,018 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/ccb54049c9ff4ef9bd6e2d19cfd9c83e, entries=150, sequenceid=321, filesize=12.0 K 2024-12-12T16:27:08,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/ff8365cce99a4c31b33f7eb69e72ac7b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/ff8365cce99a4c31b33f7eb69e72ac7b 2024-12-12T16:27:08,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:08,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020888024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:08,026 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:27:08,032 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/ff8365cce99a4c31b33f7eb69e72ac7b, entries=150, sequenceid=321, filesize=12.0 K 2024-12-12T16:27:08,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/caa336c852854041b2fa0072fab5a582 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/caa336c852854041b2fa0072fab5a582 2024-12-12T16:27:08,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:08,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020888034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:08,037 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4150 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:27:08,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:08,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020888037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:08,039 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4153 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:27:08,043 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/caa336c852854041b2fa0072fab5a582, entries=150, sequenceid=321, filesize=12.0 K 2024-12-12T16:27:08,045 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b22602467dd4e6c94f26649b7855f8e8 in 550ms, sequenceid=321, compaction requested=true 2024-12-12T16:27:08,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:08,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:08,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-12T16:27:08,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-12T16:27:08,051 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-12T16:27:08,051 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 707 msec 2024-12-12T16:27:08,053 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 714 msec 2024-12-12T16:27:08,325 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T16:27:08,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:08,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:08,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:08,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:08,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:08,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:08,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:08,331 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/0e9e0ce1bf83498d9f0adc9bf039e9f5 is 50, key is test_row_0/A:col10/1734020827691/Put/seqid=0 2024-12-12T16:27:08,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741905_1081 (size=12301) 2024-12-12T16:27:08,343 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/0e9e0ce1bf83498d9f0adc9bf039e9f5 2024-12-12T16:27:08,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/f6a1ace9cd8346f6ac4ad5b6bd077939 is 50, key is test_row_0/B:col10/1734020827691/Put/seqid=0 2024-12-12T16:27:08,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741906_1082 (size=12301) 2024-12-12T16:27:08,407 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/f6a1ace9cd8346f6ac4ad5b6bd077939 2024-12-12T16:27:08,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/22836a4a1b9746618b04bd66deddcef4 is 50, key is test_row_0/C:col10/1734020827691/Put/seqid=0 2024-12-12T16:27:08,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:08,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020888427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:08,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741907_1083 (size=12301) 2024-12-12T16:27:08,439 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/22836a4a1b9746618b04bd66deddcef4 2024-12-12T16:27:08,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T16:27:08,444 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-12T16:27:08,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/0e9e0ce1bf83498d9f0adc9bf039e9f5 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/0e9e0ce1bf83498d9f0adc9bf039e9f5 2024-12-12T16:27:08,449 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:27:08,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-12T16:27:08,452 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:27:08,452 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:27:08,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T16:27:08,453 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:27:08,457 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/0e9e0ce1bf83498d9f0adc9bf039e9f5, entries=150, sequenceid=333, filesize=12.0 K 2024-12-12T16:27:08,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/f6a1ace9cd8346f6ac4ad5b6bd077939 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f6a1ace9cd8346f6ac4ad5b6bd077939 2024-12-12T16:27:08,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f6a1ace9cd8346f6ac4ad5b6bd077939, entries=150, sequenceid=333, filesize=12.0 K 2024-12-12T16:27:08,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/22836a4a1b9746618b04bd66deddcef4 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/22836a4a1b9746618b04bd66deddcef4 2024-12-12T16:27:08,478 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/22836a4a1b9746618b04bd66deddcef4, entries=150, sequenceid=333, filesize=12.0 K 2024-12-12T16:27:08,480 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for b22602467dd4e6c94f26649b7855f8e8 in 156ms, sequenceid=333, compaction requested=true 2024-12-12T16:27:08,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:08,480 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:27:08,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:27:08,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:08,481 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:27:08,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:27:08,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:08,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:27:08,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:08,482 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:27:08,482 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:27:08,483 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/B is initiating minor compaction (all files) 2024-12-12T16:27:08,483 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/A is initiating minor compaction (all files) 2024-12-12T16:27:08,483 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/B in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:08,483 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/A in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:08,483 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/71cd824f849d49e0bb4754520294b369, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/3f2018d854f84f6ba5211e756b71f572, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/ff8365cce99a4c31b33f7eb69e72ac7b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f6a1ace9cd8346f6ac4ad5b6bd077939] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=48.7 K 2024-12-12T16:27:08,483 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/87086f51df6d4e09b8c125e5df76b423, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/6033680fb6eb44118d50b7844e7cc96f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/ccb54049c9ff4ef9bd6e2d19cfd9c83e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/0e9e0ce1bf83498d9f0adc9bf039e9f5] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=48.7 K 2024-12-12T16:27:08,484 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87086f51df6d4e09b8c125e5df76b423, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1734020823884 2024-12-12T16:27:08,484 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 71cd824f849d49e0bb4754520294b369, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1734020823884 2024-12-12T16:27:08,485 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f2018d854f84f6ba5211e756b71f572, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734020824324 2024-12-12T16:27:08,485 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6033680fb6eb44118d50b7844e7cc96f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734020824324 2024-12-12T16:27:08,485 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting ccb54049c9ff4ef9bd6e2d19cfd9c83e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1734020825541 2024-12-12T16:27:08,486 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting ff8365cce99a4c31b33f7eb69e72ac7b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1734020825541 2024-12-12T16:27:08,486 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e9e0ce1bf83498d9f0adc9bf039e9f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1734020827691 2024-12-12T16:27:08,487 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting f6a1ace9cd8346f6ac4ad5b6bd077939, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1734020827691 2024-12-12T16:27:08,511 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#A#compaction#69 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:08,512 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/bcecdb165b024f93a7bfcf84c67fb790 is 50, key is test_row_0/A:col10/1734020827691/Put/seqid=0 2024-12-12T16:27:08,514 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#B#compaction#70 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:08,515 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/755176d4a0bd45a1b2636eb951e83d9e is 50, key is test_row_0/B:col10/1734020827691/Put/seqid=0 2024-12-12T16:27:08,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:08,536 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T16:27:08,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:08,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:08,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:08,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:08,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:08,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:08,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741908_1084 (size=13085) 2024-12-12T16:27:08,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T16:27:08,554 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/bcecdb165b024f93a7bfcf84c67fb790 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/bcecdb165b024f93a7bfcf84c67fb790 2024-12-12T16:27:08,554 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/06f6a79deae6488e82cee6d07407818e is 50, key is test_row_0/A:col10/1734020828532/Put/seqid=0 2024-12-12T16:27:08,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741909_1085 (size=13085) 2024-12-12T16:27:08,564 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/A of b22602467dd4e6c94f26649b7855f8e8 into bcecdb165b024f93a7bfcf84c67fb790(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:08,564 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:08,564 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/A, priority=12, startTime=1734020828480; duration=0sec 2024-12-12T16:27:08,564 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:08,565 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:A 2024-12-12T16:27:08,565 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:27:08,569 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:27:08,569 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/C is initiating minor compaction (all files) 2024-12-12T16:27:08,570 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/C in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:08,570 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/822f4eb2675d4721b5ee245f9394ee74, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/12ba351cab2f4ed8b80100f982669678, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/caa336c852854041b2fa0072fab5a582, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/22836a4a1b9746618b04bd66deddcef4] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=48.7 K 2024-12-12T16:27:08,570 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 822f4eb2675d4721b5ee245f9394ee74, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1734020823884 2024-12-12T16:27:08,571 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12ba351cab2f4ed8b80100f982669678, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1734020824324 2024-12-12T16:27:08,571 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting caa336c852854041b2fa0072fab5a582, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1734020825541 2024-12-12T16:27:08,572 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22836a4a1b9746618b04bd66deddcef4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1734020827691 2024-12-12T16:27:08,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741910_1086 (size=12301) 2024-12-12T16:27:08,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:08,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020888581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:08,589 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#C#compaction#72 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:08,590 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/d95b33cf3ec7454ab27703aa5acb49ff is 50, key is test_row_0/C:col10/1734020827691/Put/seqid=0 2024-12-12T16:27:08,604 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:08,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741911_1087 (size=13085) 2024-12-12T16:27:08,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T16:27:08,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:08,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:08,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:08,605 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:08,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:08,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:08,618 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/d95b33cf3ec7454ab27703aa5acb49ff as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/d95b33cf3ec7454ab27703aa5acb49ff 2024-12-12T16:27:08,631 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/C of b22602467dd4e6c94f26649b7855f8e8 into d95b33cf3ec7454ab27703aa5acb49ff(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:08,631 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:08,631 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/C, priority=12, startTime=1734020828481; duration=0sec 2024-12-12T16:27:08,631 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:08,631 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:C 2024-12-12T16:27:08,686 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:08,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020888686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:08,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T16:27:08,759 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:08,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T16:27:08,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:08,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:08,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:08,760 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:08,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:08,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:08,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:08,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020888889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:08,914 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:08,914 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T16:27:08,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:08,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:08,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:08,915 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:08,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:08,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:08,965 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/755176d4a0bd45a1b2636eb951e83d9e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/755176d4a0bd45a1b2636eb951e83d9e 2024-12-12T16:27:08,978 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/B of b22602467dd4e6c94f26649b7855f8e8 into 755176d4a0bd45a1b2636eb951e83d9e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:08,978 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:08,978 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/B, priority=12, startTime=1734020828481; duration=0sec 2024-12-12T16:27:08,978 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:08,978 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:B 2024-12-12T16:27:08,983 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/06f6a79deae6488e82cee6d07407818e 2024-12-12T16:27:08,993 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/1a33477c16e44991b6552d46ff749ccf is 50, key is test_row_0/B:col10/1734020828532/Put/seqid=0 2024-12-12T16:27:08,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741912_1088 (size=12301) 2024-12-12T16:27:09,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T16:27:09,067 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:09,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T16:27:09,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:09,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:09,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:09,069 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:09,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:09,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:09,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:09,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020889194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:09,222 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:09,222 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T16:27:09,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:09,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:09,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:09,223 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:09,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:09,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:09,375 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:09,376 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T16:27:09,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:09,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:09,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:09,376 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:09,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:09,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:09,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/1a33477c16e44991b6552d46ff749ccf 2024-12-12T16:27:09,415 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/5873d095ac1f4462beff1d52a498e8b3 is 50, key is test_row_0/C:col10/1734020828532/Put/seqid=0 2024-12-12T16:27:09,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741913_1089 (size=12301) 2024-12-12T16:27:09,448 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/5873d095ac1f4462beff1d52a498e8b3 2024-12-12T16:27:09,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/06f6a79deae6488e82cee6d07407818e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/06f6a79deae6488e82cee6d07407818e 2024-12-12T16:27:09,469 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/06f6a79deae6488e82cee6d07407818e, entries=150, sequenceid=358, filesize=12.0 K 2024-12-12T16:27:09,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/1a33477c16e44991b6552d46ff749ccf as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/1a33477c16e44991b6552d46ff749ccf 2024-12-12T16:27:09,483 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/1a33477c16e44991b6552d46ff749ccf, entries=150, sequenceid=358, filesize=12.0 K 2024-12-12T16:27:09,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/5873d095ac1f4462beff1d52a498e8b3 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/5873d095ac1f4462beff1d52a498e8b3 2024-12-12T16:27:09,492 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/5873d095ac1f4462beff1d52a498e8b3, entries=150, sequenceid=358, filesize=12.0 K 2024-12-12T16:27:09,494 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b22602467dd4e6c94f26649b7855f8e8 in 958ms, sequenceid=358, compaction requested=false 2024-12-12T16:27:09,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:09,528 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:09,529 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-12T16:27:09,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:09,530 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:27:09,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:09,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:09,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:09,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:09,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:09,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:09,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/94cd97022f0e460095e6141e85433ba5 is 50, key is test_row_0/A:col10/1734020828565/Put/seqid=0 2024-12-12T16:27:09,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T16:27:09,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741914_1090 (size=12301) 2024-12-12T16:27:09,576 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/94cd97022f0e460095e6141e85433ba5 2024-12-12T16:27:09,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/b03a96bf12a54e0f840df8330fe2d5bf is 50, key is test_row_0/B:col10/1734020828565/Put/seqid=0 2024-12-12T16:27:09,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741915_1091 (size=12301) 2024-12-12T16:27:09,620 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/b03a96bf12a54e0f840df8330fe2d5bf 2024-12-12T16:27:09,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/890372f140c846c28453984931d7d3de is 50, key is test_row_0/C:col10/1734020828565/Put/seqid=0 2024-12-12T16:27:09,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741916_1092 (size=12301) 2024-12-12T16:27:09,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:09,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:09,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:09,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020889768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:09,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:09,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020889873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:10,051 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/890372f140c846c28453984931d7d3de 2024-12-12T16:27:10,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/94cd97022f0e460095e6141e85433ba5 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/94cd97022f0e460095e6141e85433ba5 2024-12-12T16:27:10,065 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/94cd97022f0e460095e6141e85433ba5, entries=150, sequenceid=372, filesize=12.0 K 2024-12-12T16:27:10,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/b03a96bf12a54e0f840df8330fe2d5bf as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/b03a96bf12a54e0f840df8330fe2d5bf 2024-12-12T16:27:10,072 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/b03a96bf12a54e0f840df8330fe2d5bf, entries=150, sequenceid=372, filesize=12.0 K 2024-12-12T16:27:10,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/890372f140c846c28453984931d7d3de as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/890372f140c846c28453984931d7d3de 2024-12-12T16:27:10,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:10,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020890078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:10,083 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/890372f140c846c28453984931d7d3de, entries=150, sequenceid=372, filesize=12.0 K 2024-12-12T16:27:10,085 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b22602467dd4e6c94f26649b7855f8e8 in 555ms, sequenceid=372, compaction requested=true 2024-12-12T16:27:10,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:10,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:10,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-12T16:27:10,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-12T16:27:10,088 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-12T16:27:10,088 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6330 sec 2024-12-12T16:27:10,090 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.6400 sec 2024-12-12T16:27:10,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:10,385 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T16:27:10,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:10,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:10,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:10,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:10,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:10,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:10,391 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/354f05f1d2ef40a1a5b3c860660b56f7 is 50, key is test_row_0/A:col10/1734020829714/Put/seqid=0 2024-12-12T16:27:10,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741917_1093 (size=14741) 2024-12-12T16:27:10,404 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/354f05f1d2ef40a1a5b3c860660b56f7 2024-12-12T16:27:10,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:10,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020890412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:10,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/c318490d103c4ce3a8c2b2e09c999b46 is 50, key is test_row_0/B:col10/1734020829714/Put/seqid=0 2024-12-12T16:27:10,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741918_1094 (size=12301) 2024-12-12T16:27:10,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:10,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020890516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:10,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T16:27:10,559 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-12T16:27:10,560 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:27:10,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-12T16:27:10,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-12T16:27:10,562 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:27:10,563 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:27:10,563 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:27:10,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-12T16:27:10,714 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:10,715 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T16:27:10,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:10,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:10,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:10,715 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:10,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:10,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:10,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:10,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020890721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:10,830 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/c318490d103c4ce3a8c2b2e09c999b46 2024-12-12T16:27:10,844 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/f220cfb931344de5bed4dd64c710c7e0 is 50, key is test_row_0/C:col10/1734020829714/Put/seqid=0 2024-12-12T16:27:10,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741919_1095 (size=12301) 2024-12-12T16:27:10,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-12T16:27:10,868 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:10,868 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T16:27:10,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:10,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:10,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:10,869 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:10,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:10,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:11,021 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:11,022 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T16:27:11,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:11,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:11,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:11,022 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:11,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:11,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:11,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:11,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020891026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:11,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-12T16:27:11,176 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:11,176 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T16:27:11,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:11,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:11,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:11,177 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:11,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:11,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:11,251 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/f220cfb931344de5bed4dd64c710c7e0 2024-12-12T16:27:11,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/354f05f1d2ef40a1a5b3c860660b56f7 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/354f05f1d2ef40a1a5b3c860660b56f7 2024-12-12T16:27:11,266 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/354f05f1d2ef40a1a5b3c860660b56f7, entries=200, sequenceid=398, filesize=14.4 K 2024-12-12T16:27:11,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/c318490d103c4ce3a8c2b2e09c999b46 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/c318490d103c4ce3a8c2b2e09c999b46 2024-12-12T16:27:11,273 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/c318490d103c4ce3a8c2b2e09c999b46, entries=150, sequenceid=398, filesize=12.0 K 2024-12-12T16:27:11,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/f220cfb931344de5bed4dd64c710c7e0 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/f220cfb931344de5bed4dd64c710c7e0 2024-12-12T16:27:11,281 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/f220cfb931344de5bed4dd64c710c7e0, entries=150, sequenceid=398, filesize=12.0 K 2024-12-12T16:27:11,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for b22602467dd4e6c94f26649b7855f8e8 in 898ms, sequenceid=398, compaction requested=true 2024-12-12T16:27:11,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:11,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:27:11,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:11,283 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:27:11,283 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:27:11,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:27:11,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:11,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:27:11,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:11,285 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:27:11,285 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/B is initiating minor compaction (all files) 2024-12-12T16:27:11,286 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/B in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:11,286 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/755176d4a0bd45a1b2636eb951e83d9e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/1a33477c16e44991b6552d46ff749ccf, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/b03a96bf12a54e0f840df8330fe2d5bf, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/c318490d103c4ce3a8c2b2e09c999b46] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=48.8 K 2024-12-12T16:27:11,286 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52428 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:27:11,286 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/A is initiating minor compaction (all files) 2024-12-12T16:27:11,286 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/A in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:11,286 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/bcecdb165b024f93a7bfcf84c67fb790, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/06f6a79deae6488e82cee6d07407818e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/94cd97022f0e460095e6141e85433ba5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/354f05f1d2ef40a1a5b3c860660b56f7] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=51.2 K 2024-12-12T16:27:11,287 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 755176d4a0bd45a1b2636eb951e83d9e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1734020827691 2024-12-12T16:27:11,287 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting bcecdb165b024f93a7bfcf84c67fb790, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1734020827691 2024-12-12T16:27:11,288 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a33477c16e44991b6552d46ff749ccf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1734020828386 2024-12-12T16:27:11,288 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06f6a79deae6488e82cee6d07407818e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1734020828386 2024-12-12T16:27:11,288 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting b03a96bf12a54e0f840df8330fe2d5bf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1734020828558 2024-12-12T16:27:11,289 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94cd97022f0e460095e6141e85433ba5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1734020828558 2024-12-12T16:27:11,289 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting c318490d103c4ce3a8c2b2e09c999b46, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1734020829714 2024-12-12T16:27:11,289 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 354f05f1d2ef40a1a5b3c860660b56f7, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1734020829714 2024-12-12T16:27:11,314 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#B#compaction#81 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:11,317 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/1e94d32aee964f00a5f9274fd927c1ae is 50, key is test_row_0/B:col10/1734020829714/Put/seqid=0 2024-12-12T16:27:11,319 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#A#compaction#82 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:11,319 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/b8092130ab9e434d9e62764284809d38 is 50, key is test_row_0/A:col10/1734020829714/Put/seqid=0 2024-12-12T16:27:11,329 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:11,330 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-12T16:27:11,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:11,331 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-12T16:27:11,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:11,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:11,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:11,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:11,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:11,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:11,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741920_1096 (size=13221) 2024-12-12T16:27:11,352 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/1e94d32aee964f00a5f9274fd927c1ae as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/1e94d32aee964f00a5f9274fd927c1ae 2024-12-12T16:27:11,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741921_1097 (size=13221) 2024-12-12T16:27:11,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/ea9c4f54519e4ddb96809922df365bfa is 50, key is test_row_0/A:col10/1734020830409/Put/seqid=0 2024-12-12T16:27:11,369 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/b8092130ab9e434d9e62764284809d38 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b8092130ab9e434d9e62764284809d38 2024-12-12T16:27:11,377 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/A of b22602467dd4e6c94f26649b7855f8e8 into b8092130ab9e434d9e62764284809d38(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:11,378 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:11,378 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/A, priority=12, startTime=1734020831282; duration=0sec 2024-12-12T16:27:11,378 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:11,378 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:A 2024-12-12T16:27:11,378 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:27:11,380 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/B of b22602467dd4e6c94f26649b7855f8e8 into 1e94d32aee964f00a5f9274fd927c1ae(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:11,381 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:11,381 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/B, priority=12, startTime=1734020831283; duration=0sec 2024-12-12T16:27:11,381 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:11,381 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:B 2024-12-12T16:27:11,383 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:27:11,383 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/C is initiating minor compaction (all files) 2024-12-12T16:27:11,383 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/C in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:11,383 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/d95b33cf3ec7454ab27703aa5acb49ff, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/5873d095ac1f4462beff1d52a498e8b3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/890372f140c846c28453984931d7d3de, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/f220cfb931344de5bed4dd64c710c7e0] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=48.8 K 2024-12-12T16:27:11,384 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting d95b33cf3ec7454ab27703aa5acb49ff, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1734020827691 2024-12-12T16:27:11,384 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5873d095ac1f4462beff1d52a498e8b3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1734020828386 2024-12-12T16:27:11,385 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 890372f140c846c28453984931d7d3de, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1734020828558 2024-12-12T16:27:11,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741922_1098 (size=12301) 2024-12-12T16:27:11,388 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting f220cfb931344de5bed4dd64c710c7e0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1734020829714 2024-12-12T16:27:11,413 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#C#compaction#84 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:11,414 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/00a68d4586d34992ac400d502131ba84 is 50, key is test_row_0/C:col10/1734020829714/Put/seqid=0 2024-12-12T16:27:11,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741923_1099 (size=13221) 2024-12-12T16:27:11,456 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/00a68d4586d34992ac400d502131ba84 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/00a68d4586d34992ac400d502131ba84 2024-12-12T16:27:11,471 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/C of b22602467dd4e6c94f26649b7855f8e8 into 00a68d4586d34992ac400d502131ba84(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:11,471 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:11,471 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/C, priority=12, startTime=1734020831283; duration=0sec 2024-12-12T16:27:11,471 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:11,471 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:C 2024-12-12T16:27:11,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:11,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:11,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:11,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020891637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:11,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-12T16:27:11,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:11,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020891739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:11,789 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/ea9c4f54519e4ddb96809922df365bfa 2024-12-12T16:27:11,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/f17fd6b2921d4ad3ad8815dc4792a40f is 50, key is test_row_0/B:col10/1734020830409/Put/seqid=0 2024-12-12T16:27:11,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741924_1100 (size=12301) 2024-12-12T16:27:11,831 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/f17fd6b2921d4ad3ad8815dc4792a40f 2024-12-12T16:27:11,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/f8508671e91640dea2245a5f43481eba is 50, key is test_row_0/C:col10/1734020830409/Put/seqid=0 2024-12-12T16:27:11,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741925_1101 (size=12301) 2024-12-12T16:27:11,876 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/f8508671e91640dea2245a5f43481eba 2024-12-12T16:27:11,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/ea9c4f54519e4ddb96809922df365bfa as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/ea9c4f54519e4ddb96809922df365bfa 2024-12-12T16:27:11,898 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/ea9c4f54519e4ddb96809922df365bfa, entries=150, sequenceid=408, filesize=12.0 K 2024-12-12T16:27:11,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/f17fd6b2921d4ad3ad8815dc4792a40f as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f17fd6b2921d4ad3ad8815dc4792a40f 2024-12-12T16:27:11,908 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f17fd6b2921d4ad3ad8815dc4792a40f, entries=150, sequenceid=408, filesize=12.0 K 2024-12-12T16:27:11,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/f8508671e91640dea2245a5f43481eba as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/f8508671e91640dea2245a5f43481eba 2024-12-12T16:27:11,918 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/f8508671e91640dea2245a5f43481eba, entries=150, sequenceid=408, filesize=12.0 K 2024-12-12T16:27:11,919 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for b22602467dd4e6c94f26649b7855f8e8 in 588ms, sequenceid=408, compaction requested=false 2024-12-12T16:27:11,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:11,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:11,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-12T16:27:11,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-12T16:27:11,922 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-12T16:27:11,922 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3570 sec 2024-12-12T16:27:11,923 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.3620 sec 2024-12-12T16:27:11,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:11,944 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-12T16:27:11,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:11,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:11,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:11,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:11,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:11,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:11,950 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/b8c2eba3df9c40acab6aaf5d3a879424 is 50, key is test_row_0/A:col10/1734020831942/Put/seqid=0 2024-12-12T16:27:11,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:11,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020891961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:11,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741926_1102 (size=12301) 2024-12-12T16:27:11,969 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/b8c2eba3df9c40acab6aaf5d3a879424 2024-12-12T16:27:11,999 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/9473338fa4fc42e58616e16b253c9c0b is 50, key is test_row_0/B:col10/1734020831942/Put/seqid=0 2024-12-12T16:27:12,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741927_1103 (size=12301) 2024-12-12T16:27:12,008 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/9473338fa4fc42e58616e16b253c9c0b 2024-12-12T16:27:12,020 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/6cde229122c54aa7a0a62e3715f4562c is 50, key is test_row_0/C:col10/1734020831942/Put/seqid=0 2024-12-12T16:27:12,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741928_1104 (size=12301) 2024-12-12T16:27:12,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:12,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35722 deadline: 1734020892034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:12,037 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8149 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:27:12,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:12,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35734 deadline: 1734020892044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:12,047 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8161 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:27:12,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:12,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35756 deadline: 1734020892050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:12,052 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8166 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:27:12,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:12,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020892064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:12,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:12,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020892267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:12,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/6cde229122c54aa7a0a62e3715f4562c 2024-12-12T16:27:12,436 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/b8c2eba3df9c40acab6aaf5d3a879424 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b8c2eba3df9c40acab6aaf5d3a879424 2024-12-12T16:27:12,462 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b8c2eba3df9c40acab6aaf5d3a879424, entries=150, sequenceid=438, filesize=12.0 K 2024-12-12T16:27:12,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/9473338fa4fc42e58616e16b253c9c0b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/9473338fa4fc42e58616e16b253c9c0b 2024-12-12T16:27:12,477 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/9473338fa4fc42e58616e16b253c9c0b, entries=150, sequenceid=438, filesize=12.0 K 2024-12-12T16:27:12,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/6cde229122c54aa7a0a62e3715f4562c as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6cde229122c54aa7a0a62e3715f4562c 2024-12-12T16:27:12,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6cde229122c54aa7a0a62e3715f4562c, entries=150, sequenceid=438, filesize=12.0 K 2024-12-12T16:27:12,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for b22602467dd4e6c94f26649b7855f8e8 in 544ms, sequenceid=438, compaction requested=true 2024-12-12T16:27:12,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:12,488 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:12,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:27:12,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:12,489 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:12,489 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:27:12,489 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:12,489 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:12,490 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/A is initiating minor compaction (all files) 2024-12-12T16:27:12,490 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/A in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:12,490 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b8092130ab9e434d9e62764284809d38, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/ea9c4f54519e4ddb96809922df365bfa, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b8c2eba3df9c40acab6aaf5d3a879424] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=36.9 K 2024-12-12T16:27:12,490 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:12,490 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/B is initiating minor compaction (all files) 2024-12-12T16:27:12,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:27:12,491 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/B in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:12,491 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:12,491 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/1e94d32aee964f00a5f9274fd927c1ae, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f17fd6b2921d4ad3ad8815dc4792a40f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/9473338fa4fc42e58616e16b253c9c0b] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=36.9 K 2024-12-12T16:27:12,491 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8092130ab9e434d9e62764284809d38, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1734020829714 2024-12-12T16:27:12,491 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e94d32aee964f00a5f9274fd927c1ae, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1734020829714 2024-12-12T16:27:12,491 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea9c4f54519e4ddb96809922df365bfa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1734020830397 2024-12-12T16:27:12,493 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting f17fd6b2921d4ad3ad8815dc4792a40f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1734020830397 2024-12-12T16:27:12,493 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8c2eba3df9c40acab6aaf5d3a879424, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1734020831608 2024-12-12T16:27:12,493 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 9473338fa4fc42e58616e16b253c9c0b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1734020831608 2024-12-12T16:27:12,514 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#B#compaction#90 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:12,514 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#A#compaction#91 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:12,515 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/66a3416074824bbea7de508cb82abf43 is 50, key is test_row_0/B:col10/1734020831942/Put/seqid=0 2024-12-12T16:27:12,515 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/6e0a4bfb66ed434b89d0104cb12f336e is 50, key is test_row_0/A:col10/1734020831942/Put/seqid=0 2024-12-12T16:27:12,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741929_1105 (size=13323) 2024-12-12T16:27:12,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741930_1106 (size=13323) 2024-12-12T16:27:12,544 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/6e0a4bfb66ed434b89d0104cb12f336e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/6e0a4bfb66ed434b89d0104cb12f336e 2024-12-12T16:27:12,554 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/66a3416074824bbea7de508cb82abf43 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/66a3416074824bbea7de508cb82abf43 2024-12-12T16:27:12,557 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/A of b22602467dd4e6c94f26649b7855f8e8 into 6e0a4bfb66ed434b89d0104cb12f336e(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:12,557 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:12,558 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/A, priority=13, startTime=1734020832488; duration=0sec 2024-12-12T16:27:12,558 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:12,558 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:A 2024-12-12T16:27:12,558 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:12,559 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:12,560 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/C is initiating minor compaction (all files) 2024-12-12T16:27:12,560 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/C in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:12,560 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/00a68d4586d34992ac400d502131ba84, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/f8508671e91640dea2245a5f43481eba, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6cde229122c54aa7a0a62e3715f4562c] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=36.9 K 2024-12-12T16:27:12,560 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 00a68d4586d34992ac400d502131ba84, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1734020829714 2024-12-12T16:27:12,561 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8508671e91640dea2245a5f43481eba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1734020830397 2024-12-12T16:27:12,562 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6cde229122c54aa7a0a62e3715f4562c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1734020831608 2024-12-12T16:27:12,562 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/B of b22602467dd4e6c94f26649b7855f8e8 into 66a3416074824bbea7de508cb82abf43(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:12,562 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:12,562 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/B, priority=13, startTime=1734020832488; duration=0sec 2024-12-12T16:27:12,562 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:12,562 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:B 2024-12-12T16:27:12,573 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#C#compaction#92 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:12,574 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/3d85892f00174e48a8d8cae8d491820b is 50, key is test_row_0/C:col10/1734020831942/Put/seqid=0 2024-12-12T16:27:12,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:12,581 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:27:12,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:12,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:12,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:12,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:12,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:12,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:12,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741931_1107 (size=13323) 2024-12-12T16:27:12,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/bda905ac0fcd4dafbb1e919009168979 is 50, key is test_row_1/A:col10/1734020832578/Put/seqid=0 2024-12-12T16:27:12,609 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/3d85892f00174e48a8d8cae8d491820b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/3d85892f00174e48a8d8cae8d491820b 2024-12-12T16:27:12,618 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/C of b22602467dd4e6c94f26649b7855f8e8 into 3d85892f00174e48a8d8cae8d491820b(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:12,618 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:12,618 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/C, priority=13, startTime=1734020832489; duration=0sec 2024-12-12T16:27:12,618 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:12,618 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:C 2024-12-12T16:27:12,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741932_1108 (size=9857) 2024-12-12T16:27:12,625 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/bda905ac0fcd4dafbb1e919009168979 2024-12-12T16:27:12,638 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/7cbe4cee90b044a3b8f28af3b7ac0b9a is 50, key is test_row_1/B:col10/1734020832578/Put/seqid=0 2024-12-12T16:27:12,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741933_1109 (size=9857) 2024-12-12T16:27:12,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-12T16:27:12,667 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-12T16:27:12,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:12,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 294 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020892666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:12,669 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:27:12,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-12-12T16:27:12,672 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:27:12,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-12T16:27:12,673 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:27:12,674 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:27:12,773 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:12,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 296 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020892773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:12,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-12T16:27:12,826 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:12,826 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-12T16:27:12,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:12,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:12,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:12,827 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:12,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:12,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:12,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-12T16:27:12,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:12,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 298 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020892975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:12,980 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:12,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-12T16:27:12,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:12,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:12,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:12,981 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:12,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:12,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:13,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/7cbe4cee90b044a3b8f28af3b7ac0b9a 2024-12-12T16:27:13,073 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/5dc5e53e99594381879b9698793ed29e is 50, key is test_row_1/C:col10/1734020832578/Put/seqid=0 2024-12-12T16:27:13,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741934_1110 (size=9857) 2024-12-12T16:27:13,094 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/5dc5e53e99594381879b9698793ed29e 2024-12-12T16:27:13,101 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/bda905ac0fcd4dafbb1e919009168979 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/bda905ac0fcd4dafbb1e919009168979 2024-12-12T16:27:13,107 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/bda905ac0fcd4dafbb1e919009168979, entries=100, sequenceid=451, filesize=9.6 K 2024-12-12T16:27:13,110 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/7cbe4cee90b044a3b8f28af3b7ac0b9a as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/7cbe4cee90b044a3b8f28af3b7ac0b9a 2024-12-12T16:27:13,116 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/7cbe4cee90b044a3b8f28af3b7ac0b9a, entries=100, sequenceid=451, filesize=9.6 K 2024-12-12T16:27:13,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/5dc5e53e99594381879b9698793ed29e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/5dc5e53e99594381879b9698793ed29e 2024-12-12T16:27:13,124 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/5dc5e53e99594381879b9698793ed29e, entries=100, sequenceid=451, filesize=9.6 K 2024-12-12T16:27:13,125 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b22602467dd4e6c94f26649b7855f8e8 in 544ms, sequenceid=451, compaction requested=false 2024-12-12T16:27:13,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:13,133 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:13,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-12T16:27:13,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:13,134 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T16:27:13,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:13,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:13,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:13,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:13,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:13,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:13,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/4ece7494fa49474dbd5cd24655f62df7 is 50, key is test_row_0/A:col10/1734020832663/Put/seqid=0 2024-12-12T16:27:13,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741935_1111 (size=12301) 2024-12-12T16:27:13,188 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/4ece7494fa49474dbd5cd24655f62df7 2024-12-12T16:27:13,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/a1f3a29477e142f09bcb37f7678064ff is 50, key is test_row_0/B:col10/1734020832663/Put/seqid=0 2024-12-12T16:27:13,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741936_1112 (size=12301) 2024-12-12T16:27:13,235 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/a1f3a29477e142f09bcb37f7678064ff 2024-12-12T16:27:13,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/e2e0a8ed8cfb48d3a04261599070d0fd is 50, key is test_row_0/C:col10/1734020832663/Put/seqid=0 2024-12-12T16:27:13,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741937_1113 (size=12301) 2024-12-12T16:27:13,268 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/e2e0a8ed8cfb48d3a04261599070d0fd 2024-12-12T16:27:13,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-12T16:27:13,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/4ece7494fa49474dbd5cd24655f62df7 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/4ece7494fa49474dbd5cd24655f62df7 2024-12-12T16:27:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:13,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:13,289 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/4ece7494fa49474dbd5cd24655f62df7, entries=150, sequenceid=477, filesize=12.0 K 2024-12-12T16:27:13,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/a1f3a29477e142f09bcb37f7678064ff as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/a1f3a29477e142f09bcb37f7678064ff 2024-12-12T16:27:13,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:13,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 308 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020893301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:13,308 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/a1f3a29477e142f09bcb37f7678064ff, entries=150, sequenceid=477, filesize=12.0 K 2024-12-12T16:27:13,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/e2e0a8ed8cfb48d3a04261599070d0fd as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/e2e0a8ed8cfb48d3a04261599070d0fd 2024-12-12T16:27:13,318 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/e2e0a8ed8cfb48d3a04261599070d0fd, entries=150, sequenceid=477, filesize=12.0 K 2024-12-12T16:27:13,319 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b22602467dd4e6c94f26649b7855f8e8 in 184ms, sequenceid=477, compaction requested=true 2024-12-12T16:27:13,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:13,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:13,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-12-12T16:27:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-12-12T16:27:13,322 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-12T16:27:13,322 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 646 msec 2024-12-12T16:27:13,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 654 msec 2024-12-12T16:27:13,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:13,412 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T16:27:13,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:13,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:13,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:13,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:13,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:13,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:13,418 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/552c37fa55b343479ffd166a3393bdf1 is 50, key is test_row_0/A:col10/1734020833297/Put/seqid=0 2024-12-12T16:27:13,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741938_1114 (size=14741) 2024-12-12T16:27:13,432 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/552c37fa55b343479ffd166a3393bdf1 2024-12-12T16:27:13,450 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/7ebfa3b8601d48bb9e6b775b44f8dfb5 is 50, key is test_row_0/B:col10/1734020833297/Put/seqid=0 2024-12-12T16:27:13,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741939_1115 (size=12301) 2024-12-12T16:27:13,467 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/7ebfa3b8601d48bb9e6b775b44f8dfb5 2024-12-12T16:27:13,481 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/cd7492eb1a2d4228835c3ff0db1491cd is 50, key is test_row_0/C:col10/1734020833297/Put/seqid=0 2024-12-12T16:27:13,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:13,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 332 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020893494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:13,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741940_1116 (size=12301) 2024-12-12T16:27:13,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:13,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 334 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020893598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:13,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-12T16:27:13,777 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-12T16:27:13,778 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:27:13,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-12-12T16:27:13,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-12T16:27:13,780 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:27:13,781 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:27:13,781 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:27:13,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:13,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 336 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020893801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:13,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-12T16:27:13,904 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/cd7492eb1a2d4228835c3ff0db1491cd 2024-12-12T16:27:13,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/552c37fa55b343479ffd166a3393bdf1 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/552c37fa55b343479ffd166a3393bdf1 2024-12-12T16:27:13,915 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/552c37fa55b343479ffd166a3393bdf1, entries=200, sequenceid=489, filesize=14.4 K 2024-12-12T16:27:13,916 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/7ebfa3b8601d48bb9e6b775b44f8dfb5 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/7ebfa3b8601d48bb9e6b775b44f8dfb5 2024-12-12T16:27:13,925 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/7ebfa3b8601d48bb9e6b775b44f8dfb5, entries=150, sequenceid=489, filesize=12.0 K 2024-12-12T16:27:13,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/cd7492eb1a2d4228835c3ff0db1491cd as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/cd7492eb1a2d4228835c3ff0db1491cd 2024-12-12T16:27:13,933 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:13,934 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/cd7492eb1a2d4228835c3ff0db1491cd, entries=150, sequenceid=489, filesize=12.0 K 2024-12-12T16:27:13,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-12T16:27:13,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:13,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:13,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:13,934 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:13,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:13,936 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for b22602467dd4e6c94f26649b7855f8e8 in 523ms, sequenceid=489, compaction requested=true 2024-12-12T16:27:13,936 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:13,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:27:13,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:13,936 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:27:13,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:27:13,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:13,936 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:27:13,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:27:13,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:13,938 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47782 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:27:13,938 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/B is initiating minor compaction (all files) 2024-12-12T16:27:13,938 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/B in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:13,938 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/66a3416074824bbea7de508cb82abf43, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/7cbe4cee90b044a3b8f28af3b7ac0b9a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/a1f3a29477e142f09bcb37f7678064ff, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/7ebfa3b8601d48bb9e6b775b44f8dfb5] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=46.7 K 2024-12-12T16:27:13,939 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50222 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:27:13,939 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/A is initiating minor compaction (all files) 2024-12-12T16:27:13,939 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/A in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:13,939 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/6e0a4bfb66ed434b89d0104cb12f336e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/bda905ac0fcd4dafbb1e919009168979, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/4ece7494fa49474dbd5cd24655f62df7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/552c37fa55b343479ffd166a3393bdf1] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=49.0 K 2024-12-12T16:27:13,939 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 66a3416074824bbea7de508cb82abf43, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1734020831608 2024-12-12T16:27:13,940 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e0a4bfb66ed434b89d0104cb12f336e, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1734020831608 2024-12-12T16:27:13,940 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 7cbe4cee90b044a3b8f28af3b7ac0b9a, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1734020831960 2024-12-12T16:27:13,940 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting bda905ac0fcd4dafbb1e919009168979, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1734020831960 2024-12-12T16:27:13,941 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ece7494fa49474dbd5cd24655f62df7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1734020832632 2024-12-12T16:27:13,941 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting a1f3a29477e142f09bcb37f7678064ff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1734020832632 2024-12-12T16:27:13,941 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ebfa3b8601d48bb9e6b775b44f8dfb5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=489, earliestPutTs=1734020833297 2024-12-12T16:27:13,941 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 552c37fa55b343479ffd166a3393bdf1, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=489, earliestPutTs=1734020833288 2024-12-12T16:27:13,959 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#B#compaction#103 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:13,959 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#A#compaction#102 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:13,960 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/56cc04b39c0e42e180d7e634becc7a70 is 50, key is test_row_0/A:col10/1734020833297/Put/seqid=0 2024-12-12T16:27:13,960 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/ea0b1eb6928a4769a7e01a4291ee8d14 is 50, key is test_row_0/B:col10/1734020833297/Put/seqid=0 2024-12-12T16:27:13,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741942_1118 (size=13459) 2024-12-12T16:27:13,985 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/ea0b1eb6928a4769a7e01a4291ee8d14 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/ea0b1eb6928a4769a7e01a4291ee8d14 2024-12-12T16:27:13,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741941_1117 (size=13459) 2024-12-12T16:27:13,994 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/B of b22602467dd4e6c94f26649b7855f8e8 into ea0b1eb6928a4769a7e01a4291ee8d14(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:13,994 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:13,994 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/B, priority=12, startTime=1734020833936; duration=0sec 2024-12-12T16:27:13,995 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:13,995 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/56cc04b39c0e42e180d7e634becc7a70 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/56cc04b39c0e42e180d7e634becc7a70 2024-12-12T16:27:13,995 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:B 2024-12-12T16:27:13,995 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:27:13,997 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47782 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:27:13,997 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/C is initiating minor compaction (all files) 2024-12-12T16:27:13,997 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/C in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:13,997 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/3d85892f00174e48a8d8cae8d491820b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/5dc5e53e99594381879b9698793ed29e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/e2e0a8ed8cfb48d3a04261599070d0fd, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/cd7492eb1a2d4228835c3ff0db1491cd] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=46.7 K 2024-12-12T16:27:13,998 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d85892f00174e48a8d8cae8d491820b, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1734020831608 2024-12-12T16:27:13,999 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 5dc5e53e99594381879b9698793ed29e, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1734020831960 2024-12-12T16:27:14,000 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting e2e0a8ed8cfb48d3a04261599070d0fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1734020832632 2024-12-12T16:27:14,001 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting cd7492eb1a2d4228835c3ff0db1491cd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=489, earliestPutTs=1734020833297 2024-12-12T16:27:14,003 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/A of b22602467dd4e6c94f26649b7855f8e8 into 56cc04b39c0e42e180d7e634becc7a70(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:14,003 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:14,003 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/A, priority=12, startTime=1734020833936; duration=0sec 2024-12-12T16:27:14,003 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:14,003 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:A 2024-12-12T16:27:14,025 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#C#compaction#104 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:14,026 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/0436f2bbc74b4645886b9d75dc20351a is 50, key is test_row_0/C:col10/1734020833297/Put/seqid=0 2024-12-12T16:27:14,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741943_1119 (size=13459) 2024-12-12T16:27:14,045 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/0436f2bbc74b4645886b9d75dc20351a as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/0436f2bbc74b4645886b9d75dc20351a 2024-12-12T16:27:14,056 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/C of b22602467dd4e6c94f26649b7855f8e8 into 0436f2bbc74b4645886b9d75dc20351a(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:14,056 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:14,056 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/C, priority=12, startTime=1734020833936; duration=0sec 2024-12-12T16:27:14,056 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:14,056 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:C 2024-12-12T16:27:14,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-12T16:27:14,087 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:14,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-12T16:27:14,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:14,088 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T16:27:14,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:14,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:14,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:14,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:14,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:14,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:14,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/99f217206aac4acab843257267ec59d2 is 50, key is test_row_0/A:col10/1734020833471/Put/seqid=0 2024-12-12T16:27:14,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:14,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:14,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741944_1120 (size=12301) 2024-12-12T16:27:14,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:14,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 347 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020894131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:14,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:14,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 349 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020894233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:14,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-12T16:27:14,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:14,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 351 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020894437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:14,516 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=516 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/99f217206aac4acab843257267ec59d2 2024-12-12T16:27:14,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/870f898b757e4ceb832b7482c7987fa8 is 50, key is test_row_0/B:col10/1734020833471/Put/seqid=0 2024-12-12T16:27:14,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741945_1121 (size=12301) 2024-12-12T16:27:14,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:14,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 353 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020894742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:14,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-12T16:27:14,936 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=516 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/870f898b757e4ceb832b7482c7987fa8 2024-12-12T16:27:14,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/ef5e2e2f23044c64adc3ed2d532f577b is 50, key is test_row_0/C:col10/1734020833471/Put/seqid=0 2024-12-12T16:27:14,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741946_1122 (size=12301) 2024-12-12T16:27:15,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:15,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 355 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020895245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:15,367 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=516 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/ef5e2e2f23044c64adc3ed2d532f577b 2024-12-12T16:27:15,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/99f217206aac4acab843257267ec59d2 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/99f217206aac4acab843257267ec59d2 2024-12-12T16:27:15,381 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/99f217206aac4acab843257267ec59d2, entries=150, sequenceid=516, filesize=12.0 K 2024-12-12T16:27:15,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/870f898b757e4ceb832b7482c7987fa8 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/870f898b757e4ceb832b7482c7987fa8 2024-12-12T16:27:15,391 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/870f898b757e4ceb832b7482c7987fa8, entries=150, sequenceid=516, filesize=12.0 K 2024-12-12T16:27:15,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/ef5e2e2f23044c64adc3ed2d532f577b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/ef5e2e2f23044c64adc3ed2d532f577b 2024-12-12T16:27:15,399 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/ef5e2e2f23044c64adc3ed2d532f577b, entries=150, sequenceid=516, filesize=12.0 K 2024-12-12T16:27:15,400 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for b22602467dd4e6c94f26649b7855f8e8 in 1312ms, sequenceid=516, compaction requested=false 2024-12-12T16:27:15,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:15,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:15,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-12T16:27:15,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-12-12T16:27:15,403 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-12T16:27:15,403 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6210 sec 2024-12-12T16:27:15,405 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.6260 sec 2024-12-12T16:27:15,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-12T16:27:15,885 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-12-12T16:27:15,887 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:27:15,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-12-12T16:27:15,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T16:27:15,891 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:27:15,891 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:27:15,892 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:27:15,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T16:27:16,044 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:16,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-12T16:27:16,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:16,045 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T16:27:16,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:16,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:16,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:16,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:16,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:16,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:16,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/85a7ae74e3cc494f922cec7afa048657 is 50, key is test_row_0/A:col10/1734020834130/Put/seqid=0 2024-12-12T16:27:16,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741947_1123 (size=12301) 2024-12-12T16:27:16,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T16:27:16,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:16,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. as already flushing 2024-12-12T16:27:16,297 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:16,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 378 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020896296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:16,400 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:16,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 380 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020896399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:16,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:16,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35766 deadline: 1734020896453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:16,455 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18290 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:27:16,457 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=528 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/85a7ae74e3cc494f922cec7afa048657 2024-12-12T16:27:16,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/55adae08c1bd4aa59514d0a2422fd4c8 is 50, key is test_row_0/B:col10/1734020834130/Put/seqid=0 2024-12-12T16:27:16,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741948_1124 (size=12301) 2024-12-12T16:27:16,476 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=528 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/55adae08c1bd4aa59514d0a2422fd4c8 2024-12-12T16:27:16,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/bb5543e60c6f455ab71c699a30970f23 is 50, key is test_row_0/C:col10/1734020834130/Put/seqid=0 2024-12-12T16:27:16,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741949_1125 (size=12301) 2024-12-12T16:27:16,491 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=528 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/bb5543e60c6f455ab71c699a30970f23 2024-12-12T16:27:16,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T16:27:16,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/85a7ae74e3cc494f922cec7afa048657 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/85a7ae74e3cc494f922cec7afa048657 2024-12-12T16:27:16,503 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/85a7ae74e3cc494f922cec7afa048657, entries=150, sequenceid=528, filesize=12.0 K 2024-12-12T16:27:16,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/55adae08c1bd4aa59514d0a2422fd4c8 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/55adae08c1bd4aa59514d0a2422fd4c8 2024-12-12T16:27:16,511 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/55adae08c1bd4aa59514d0a2422fd4c8, entries=150, sequenceid=528, filesize=12.0 K 2024-12-12T16:27:16,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/bb5543e60c6f455ab71c699a30970f23 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/bb5543e60c6f455ab71c699a30970f23 2024-12-12T16:27:16,520 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/bb5543e60c6f455ab71c699a30970f23, entries=150, sequenceid=528, filesize=12.0 K 2024-12-12T16:27:16,521 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for b22602467dd4e6c94f26649b7855f8e8 in 476ms, sequenceid=528, compaction requested=true 2024-12-12T16:27:16,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:16,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:16,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-12-12T16:27:16,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-12-12T16:27:16,525 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-12T16:27:16,525 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 632 msec 2024-12-12T16:27:16,527 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 639 msec 2024-12-12T16:27:16,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:16,605 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T16:27:16,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:16,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:16,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:16,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:16,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:16,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:16,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/26e5133e4cf24ebdac8b8a8c21e01cb9 is 50, key is test_row_0/A:col10/1734020836295/Put/seqid=0 2024-12-12T16:27:16,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741950_1126 (size=14741) 2024-12-12T16:27:16,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=553 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/26e5133e4cf24ebdac8b8a8c21e01cb9 2024-12-12T16:27:16,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/c9b56d1c32bb4c24876e58d57600c07e is 50, key is test_row_0/B:col10/1734020836295/Put/seqid=0 2024-12-12T16:27:16,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741951_1127 (size=12301) 2024-12-12T16:27:16,643 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=553 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/c9b56d1c32bb4c24876e58d57600c07e 2024-12-12T16:27:16,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:16,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 391 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020896644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:16,656 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/6a8092cf167946c0bd0079ed098b2d36 is 50, key is test_row_0/C:col10/1734020836295/Put/seqid=0 2024-12-12T16:27:16,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741952_1128 (size=12301) 2024-12-12T16:27:16,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:16,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 393 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020896753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:16,859 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x490457fd to 127.0.0.1:52684 2024-12-12T16:27:16,859 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72e97e4b to 127.0.0.1:52684 2024-12-12T16:27:16,859 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:16,859 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:16,859 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f6b07e3 to 127.0.0.1:52684 2024-12-12T16:27:16,859 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:16,866 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c8de680 to 127.0.0.1:52684 2024-12-12T16:27:16,866 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:16,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:16,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 395 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35762 deadline: 1734020896955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:16,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T16:27:16,993 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-12-12T16:27:17,063 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=553 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/6a8092cf167946c0bd0079ed098b2d36 2024-12-12T16:27:17,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/26e5133e4cf24ebdac8b8a8c21e01cb9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/26e5133e4cf24ebdac8b8a8c21e01cb9 2024-12-12T16:27:17,073 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/26e5133e4cf24ebdac8b8a8c21e01cb9, entries=200, sequenceid=553, filesize=14.4 K 2024-12-12T16:27:17,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/c9b56d1c32bb4c24876e58d57600c07e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/c9b56d1c32bb4c24876e58d57600c07e 2024-12-12T16:27:17,079 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/c9b56d1c32bb4c24876e58d57600c07e, entries=150, sequenceid=553, filesize=12.0 K 2024-12-12T16:27:17,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/6a8092cf167946c0bd0079ed098b2d36 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6a8092cf167946c0bd0079ed098b2d36 2024-12-12T16:27:17,085 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6a8092cf167946c0bd0079ed098b2d36, entries=150, sequenceid=553, filesize=12.0 K 2024-12-12T16:27:17,086 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b22602467dd4e6c94f26649b7855f8e8 in 480ms, sequenceid=553, compaction requested=true 2024-12-12T16:27:17,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:17,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:27:17,086 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:27:17,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:17,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:27:17,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:17,086 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:27:17,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b22602467dd4e6c94f26649b7855f8e8:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:27:17,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:17,087 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52802 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:27:17,087 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/A is initiating minor compaction (all files) 2024-12-12T16:27:17,087 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50362 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:27:17,088 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/A in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:17,088 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/B is initiating minor compaction (all files) 2024-12-12T16:27:17,088 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/56cc04b39c0e42e180d7e634becc7a70, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/99f217206aac4acab843257267ec59d2, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/85a7ae74e3cc494f922cec7afa048657, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/26e5133e4cf24ebdac8b8a8c21e01cb9] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=51.6 K 2024-12-12T16:27:17,088 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/B in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:17,088 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/ea0b1eb6928a4769a7e01a4291ee8d14, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/870f898b757e4ceb832b7482c7987fa8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/55adae08c1bd4aa59514d0a2422fd4c8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/c9b56d1c32bb4c24876e58d57600c07e] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=49.2 K 2024-12-12T16:27:17,088 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56cc04b39c0e42e180d7e634becc7a70, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=489, earliestPutTs=1734020833297 2024-12-12T16:27:17,088 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting ea0b1eb6928a4769a7e01a4291ee8d14, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=489, earliestPutTs=1734020833297 2024-12-12T16:27:17,088 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99f217206aac4acab843257267ec59d2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=516, earliestPutTs=1734020833471 2024-12-12T16:27:17,089 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85a7ae74e3cc494f922cec7afa048657, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=528, earliestPutTs=1734020834121 2024-12-12T16:27:17,089 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 870f898b757e4ceb832b7482c7987fa8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=516, earliestPutTs=1734020833471 2024-12-12T16:27:17,089 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26e5133e4cf24ebdac8b8a8c21e01cb9, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=553, earliestPutTs=1734020836289 2024-12-12T16:27:17,089 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 55adae08c1bd4aa59514d0a2422fd4c8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=528, earliestPutTs=1734020834121 2024-12-12T16:27:17,090 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting c9b56d1c32bb4c24876e58d57600c07e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=553, earliestPutTs=1734020836289 2024-12-12T16:27:17,099 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#B#compaction#114 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:17,099 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#A#compaction#115 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:17,100 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/f87c0c2e3d874091893efd7ed37d8753 is 50, key is test_row_0/B:col10/1734020836295/Put/seqid=0 2024-12-12T16:27:17,100 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/cfa98ce2ea7c4753b9211b0845a9c9c3 is 50, key is test_row_0/A:col10/1734020836295/Put/seqid=0 2024-12-12T16:27:17,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741954_1130 (size=13595) 2024-12-12T16:27:17,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741953_1129 (size=13595) 2024-12-12T16:27:17,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:17,261 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T16:27:17,261 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12885408 to 127.0.0.1:52684 2024-12-12T16:27:17,261 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:17,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:17,262 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:17,262 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:17,262 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:17,262 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:17,262 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:17,266 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/2d595d55db394cd6a224a95c4a484a90 is 50, key is test_row_0/A:col10/1734020836637/Put/seqid=0 2024-12-12T16:27:17,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741955_1131 (size=12301) 2024-12-12T16:27:17,511 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/cfa98ce2ea7c4753b9211b0845a9c9c3 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cfa98ce2ea7c4753b9211b0845a9c9c3 2024-12-12T16:27:17,511 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/f87c0c2e3d874091893efd7ed37d8753 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f87c0c2e3d874091893efd7ed37d8753 2024-12-12T16:27:17,517 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/A of b22602467dd4e6c94f26649b7855f8e8 into cfa98ce2ea7c4753b9211b0845a9c9c3(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:17,517 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/B of b22602467dd4e6c94f26649b7855f8e8 into f87c0c2e3d874091893efd7ed37d8753(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:17,517 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:17,517 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/A, priority=12, startTime=1734020837086; duration=0sec 2024-12-12T16:27:17,517 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:17,517 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/B, priority=12, startTime=1734020837086; duration=0sec 2024-12-12T16:27:17,517 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:17,517 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:A 2024-12-12T16:27:17,517 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:17,517 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:27:17,517 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:B 2024-12-12T16:27:17,519 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50362 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:27:17,519 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): b22602467dd4e6c94f26649b7855f8e8/C is initiating minor compaction (all files) 2024-12-12T16:27:17,519 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b22602467dd4e6c94f26649b7855f8e8/C in TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:17,519 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/0436f2bbc74b4645886b9d75dc20351a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/ef5e2e2f23044c64adc3ed2d532f577b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/bb5543e60c6f455ab71c699a30970f23, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6a8092cf167946c0bd0079ed098b2d36] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp, totalSize=49.2 K 2024-12-12T16:27:17,519 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0436f2bbc74b4645886b9d75dc20351a, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=489, earliestPutTs=1734020833297 2024-12-12T16:27:17,520 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef5e2e2f23044c64adc3ed2d532f577b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=516, earliestPutTs=1734020833471 2024-12-12T16:27:17,521 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb5543e60c6f455ab71c699a30970f23, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=528, earliestPutTs=1734020834121 2024-12-12T16:27:17,521 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a8092cf167946c0bd0079ed098b2d36, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=553, earliestPutTs=1734020836289 2024-12-12T16:27:17,531 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b22602467dd4e6c94f26649b7855f8e8#C#compaction#117 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:17,532 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/5a46a2111a3847a585c16f91ad799fc0 is 50, key is test_row_0/C:col10/1734020836295/Put/seqid=0 2024-12-12T16:27:17,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741956_1132 (size=13595) 2024-12-12T16:27:17,672 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=565 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/2d595d55db394cd6a224a95c4a484a90 2024-12-12T16:27:17,680 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/65d65ba5ab654db387d702f4bf50cc9d is 50, key is test_row_0/B:col10/1734020836637/Put/seqid=0 2024-12-12T16:27:17,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741957_1133 (size=12301) 2024-12-12T16:27:17,807 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T16:27:17,941 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/5a46a2111a3847a585c16f91ad799fc0 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/5a46a2111a3847a585c16f91ad799fc0 2024-12-12T16:27:17,946 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b22602467dd4e6c94f26649b7855f8e8/C of b22602467dd4e6c94f26649b7855f8e8 into 5a46a2111a3847a585c16f91ad799fc0(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:17,947 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:17,947 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8., storeName=b22602467dd4e6c94f26649b7855f8e8/C, priority=12, startTime=1734020837086; duration=0sec 2024-12-12T16:27:17,947 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:17,947 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b22602467dd4e6c94f26649b7855f8e8:C 2024-12-12T16:27:18,084 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=565 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/65d65ba5ab654db387d702f4bf50cc9d 2024-12-12T16:27:18,092 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/0605153573584777a77463eaa440c11a is 50, key is test_row_0/C:col10/1734020836637/Put/seqid=0 2024-12-12T16:27:18,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741958_1134 (size=12301) 2024-12-12T16:27:18,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=565 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/0605153573584777a77463eaa440c11a 2024-12-12T16:27:18,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/2d595d55db394cd6a224a95c4a484a90 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/2d595d55db394cd6a224a95c4a484a90 2024-12-12T16:27:18,506 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/2d595d55db394cd6a224a95c4a484a90, entries=150, sequenceid=565, filesize=12.0 K 2024-12-12T16:27:18,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/65d65ba5ab654db387d702f4bf50cc9d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/65d65ba5ab654db387d702f4bf50cc9d 2024-12-12T16:27:18,511 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/65d65ba5ab654db387d702f4bf50cc9d, entries=150, sequenceid=565, filesize=12.0 K 2024-12-12T16:27:18,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/0605153573584777a77463eaa440c11a as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/0605153573584777a77463eaa440c11a 2024-12-12T16:27:18,515 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/0605153573584777a77463eaa440c11a, entries=150, sequenceid=565, filesize=12.0 K 2024-12-12T16:27:18,516 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=0 B/0 for b22602467dd4e6c94f26649b7855f8e8 in 1255ms, sequenceid=565, compaction requested=false 2024-12-12T16:27:18,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:22,065 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bbb5d8a to 127.0.0.1:52684 2024-12-12T16:27:22,065 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:22,101 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72b32f98 to 127.0.0.1:52684 2024-12-12T16:27:22,101 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:22,146 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x18603bb9 to 127.0.0.1:52684 2024-12-12T16:27:22,146 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:24,946 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T16:27:24,948 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51670, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T16:27:26,463 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04977266 to 127.0.0.1:52684 2024-12-12T16:27:26,463 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:26,464 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T16:27:26,464 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 253 2024-12-12T16:27:26,464 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-12-12T16:27:26,464 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 18 2024-12-12T16:27:26,464 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-12-12T16:27:26,464 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-12-12T16:27:26,464 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T16:27:26,464 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6080 2024-12-12T16:27:26,464 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5987 2024-12-12T16:27:26,464 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T16:27:26,464 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2665 2024-12-12T16:27:26,464 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7995 rows 2024-12-12T16:27:26,464 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2685 2024-12-12T16:27:26,464 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8054 rows 2024-12-12T16:27:26,464 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T16:27:26,464 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e98ea32 to 127.0.0.1:52684 2024-12-12T16:27:26,464 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:26,468 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T16:27:26,472 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T16:27:26,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T16:27:26,482 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020846481"}]},"ts":"1734020846481"} 2024-12-12T16:27:26,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-12T16:27:26,483 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T16:27:26,485 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T16:27:26,487 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T16:27:26,492 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b22602467dd4e6c94f26649b7855f8e8, UNASSIGN}] 2024-12-12T16:27:26,493 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b22602467dd4e6c94f26649b7855f8e8, UNASSIGN 2024-12-12T16:27:26,494 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=b22602467dd4e6c94f26649b7855f8e8, regionState=CLOSING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:26,495 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T16:27:26,495 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; CloseRegionProcedure b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:27:26,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-12T16:27:26,650 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:26,652 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(124): Close b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:26,652 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T16:27:26,653 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1681): Closing b22602467dd4e6c94f26649b7855f8e8, disabling compactions & flushes 2024-12-12T16:27:26,653 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:26,653 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:26,653 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. after waiting 0 ms 2024-12-12T16:27:26,653 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:26,653 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(2837): Flushing b22602467dd4e6c94f26649b7855f8e8 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-12T16:27:26,653 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=A 2024-12-12T16:27:26,653 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:26,653 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=B 2024-12-12T16:27:26,653 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:26,653 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b22602467dd4e6c94f26649b7855f8e8, store=C 2024-12-12T16:27:26,653 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:26,658 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/b4dd61a400d547b3ba45608279d1c5f7 is 50, key is test_row_0/A:col10/1734020846462/Put/seqid=0 2024-12-12T16:27:26,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741959_1135 (size=9857) 2024-12-12T16:27:26,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-12T16:27:27,063 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=575 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/b4dd61a400d547b3ba45608279d1c5f7 2024-12-12T16:27:27,071 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/5420bbcbf6c24fccb2dfe57c4c9bc6a9 is 50, key is test_row_0/B:col10/1734020846462/Put/seqid=0 2024-12-12T16:27:27,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741960_1136 (size=9857) 2024-12-12T16:27:27,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-12T16:27:27,476 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=575 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/5420bbcbf6c24fccb2dfe57c4c9bc6a9 2024-12-12T16:27:27,484 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/6944127f063f45768ecd047e4b880c37 is 50, key is test_row_0/C:col10/1734020846462/Put/seqid=0 2024-12-12T16:27:27,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741961_1137 (size=9857) 2024-12-12T16:27:27,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-12T16:27:27,888 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=575 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/6944127f063f45768ecd047e4b880c37 2024-12-12T16:27:27,894 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/A/b4dd61a400d547b3ba45608279d1c5f7 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b4dd61a400d547b3ba45608279d1c5f7 2024-12-12T16:27:27,898 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b4dd61a400d547b3ba45608279d1c5f7, entries=100, sequenceid=575, filesize=9.6 K 2024-12-12T16:27:27,899 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/B/5420bbcbf6c24fccb2dfe57c4c9bc6a9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/5420bbcbf6c24fccb2dfe57c4c9bc6a9 2024-12-12T16:27:27,903 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/5420bbcbf6c24fccb2dfe57c4c9bc6a9, entries=100, sequenceid=575, filesize=9.6 K 2024-12-12T16:27:27,904 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/.tmp/C/6944127f063f45768ecd047e4b880c37 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6944127f063f45768ecd047e4b880c37 2024-12-12T16:27:27,908 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6944127f063f45768ecd047e4b880c37, entries=100, sequenceid=575, filesize=9.6 K 2024-12-12T16:27:27,909 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for b22602467dd4e6c94f26649b7855f8e8 in 1256ms, sequenceid=575, compaction requested=true 2024-12-12T16:27:27,909 DEBUG [StoreCloser-TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/46a10f78d1664e318fc3d1dc777d4d3f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b82b136edc104698af5aa7b0d6aa6190, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/a19011640cdd421483226e850139bf0d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/e10aee92b83b4b52b71eca3b65424d7d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/bbb50af00050449c9a4df8e31e2faeff, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/57679d0e21ac4b0e931b2e23f5561639, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/993dddae5f9e482e8c1f8d0fbb27f98e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/7a6ace4f69964854a7d8fff97095ae9a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/2ff4386772c841f68fd142432650dc2b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/fd5bee516fa9427da605f9f06469e6ba, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/c5f0835582924b2e8ffabb3fadb57481, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cd920d4432ad4f23b101bdb2b2ea6d46, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/8010a31ddda94e6bae5726db741304d1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/69aaddf8a3ba47c39393f38c1c77eb3f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cde06486c0754b61914693e0c70ec54e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cd0b575ee33b4879923196b38cec0a28, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/68a6dc7a24324f10b36623bbc409ecdb, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/ab18ee6b5e514f8ab84feea4f181d0d8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/a8b6cc2125fc408e96b3c328b2d37d07, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/87086f51df6d4e09b8c125e5df76b423, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/6033680fb6eb44118d50b7844e7cc96f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/ccb54049c9ff4ef9bd6e2d19cfd9c83e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/bcecdb165b024f93a7bfcf84c67fb790, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/0e9e0ce1bf83498d9f0adc9bf039e9f5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/06f6a79deae6488e82cee6d07407818e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/94cd97022f0e460095e6141e85433ba5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/354f05f1d2ef40a1a5b3c860660b56f7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b8092130ab9e434d9e62764284809d38, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/ea9c4f54519e4ddb96809922df365bfa, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/6e0a4bfb66ed434b89d0104cb12f336e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b8c2eba3df9c40acab6aaf5d3a879424, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/bda905ac0fcd4dafbb1e919009168979, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/4ece7494fa49474dbd5cd24655f62df7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/552c37fa55b343479ffd166a3393bdf1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/56cc04b39c0e42e180d7e634becc7a70, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/99f217206aac4acab843257267ec59d2, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/85a7ae74e3cc494f922cec7afa048657, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/26e5133e4cf24ebdac8b8a8c21e01cb9] to archive 2024-12-12T16:27:27,912 DEBUG [StoreCloser-TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:27:27,920 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/46a10f78d1664e318fc3d1dc777d4d3f to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/46a10f78d1664e318fc3d1dc777d4d3f 2024-12-12T16:27:27,921 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/7a6ace4f69964854a7d8fff97095ae9a to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/7a6ace4f69964854a7d8fff97095ae9a 2024-12-12T16:27:27,921 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/e10aee92b83b4b52b71eca3b65424d7d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/e10aee92b83b4b52b71eca3b65424d7d 2024-12-12T16:27:27,921 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/993dddae5f9e482e8c1f8d0fbb27f98e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/993dddae5f9e482e8c1f8d0fbb27f98e 2024-12-12T16:27:27,921 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/a19011640cdd421483226e850139bf0d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/a19011640cdd421483226e850139bf0d 2024-12-12T16:27:27,921 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/bbb50af00050449c9a4df8e31e2faeff to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/bbb50af00050449c9a4df8e31e2faeff 2024-12-12T16:27:27,922 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/57679d0e21ac4b0e931b2e23f5561639 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/57679d0e21ac4b0e931b2e23f5561639 2024-12-12T16:27:27,922 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b82b136edc104698af5aa7b0d6aa6190 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b82b136edc104698af5aa7b0d6aa6190 2024-12-12T16:27:27,925 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/2ff4386772c841f68fd142432650dc2b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/2ff4386772c841f68fd142432650dc2b 2024-12-12T16:27:27,925 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/fd5bee516fa9427da605f9f06469e6ba to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/fd5bee516fa9427da605f9f06469e6ba 2024-12-12T16:27:27,925 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/69aaddf8a3ba47c39393f38c1c77eb3f to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/69aaddf8a3ba47c39393f38c1c77eb3f 2024-12-12T16:27:27,925 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cd920d4432ad4f23b101bdb2b2ea6d46 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cd920d4432ad4f23b101bdb2b2ea6d46 2024-12-12T16:27:27,925 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/c5f0835582924b2e8ffabb3fadb57481 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/c5f0835582924b2e8ffabb3fadb57481 2024-12-12T16:27:27,925 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/8010a31ddda94e6bae5726db741304d1 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/8010a31ddda94e6bae5726db741304d1 2024-12-12T16:27:27,925 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cde06486c0754b61914693e0c70ec54e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cde06486c0754b61914693e0c70ec54e 2024-12-12T16:27:27,926 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cd0b575ee33b4879923196b38cec0a28 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cd0b575ee33b4879923196b38cec0a28 2024-12-12T16:27:27,927 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/68a6dc7a24324f10b36623bbc409ecdb to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/68a6dc7a24324f10b36623bbc409ecdb 2024-12-12T16:27:27,927 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/a8b6cc2125fc408e96b3c328b2d37d07 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/a8b6cc2125fc408e96b3c328b2d37d07 2024-12-12T16:27:27,927 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/ccb54049c9ff4ef9bd6e2d19cfd9c83e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/ccb54049c9ff4ef9bd6e2d19cfd9c83e 2024-12-12T16:27:27,928 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/ab18ee6b5e514f8ab84feea4f181d0d8 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/ab18ee6b5e514f8ab84feea4f181d0d8 2024-12-12T16:27:27,928 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/87086f51df6d4e09b8c125e5df76b423 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/87086f51df6d4e09b8c125e5df76b423 2024-12-12T16:27:27,928 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/0e9e0ce1bf83498d9f0adc9bf039e9f5 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/0e9e0ce1bf83498d9f0adc9bf039e9f5 2024-12-12T16:27:27,929 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/6033680fb6eb44118d50b7844e7cc96f to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/6033680fb6eb44118d50b7844e7cc96f 2024-12-12T16:27:27,929 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/bcecdb165b024f93a7bfcf84c67fb790 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/bcecdb165b024f93a7bfcf84c67fb790 2024-12-12T16:27:27,929 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/06f6a79deae6488e82cee6d07407818e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/06f6a79deae6488e82cee6d07407818e 2024-12-12T16:27:27,930 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/354f05f1d2ef40a1a5b3c860660b56f7 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/354f05f1d2ef40a1a5b3c860660b56f7 2024-12-12T16:27:27,930 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/6e0a4bfb66ed434b89d0104cb12f336e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/6e0a4bfb66ed434b89d0104cb12f336e 2024-12-12T16:27:27,930 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/ea9c4f54519e4ddb96809922df365bfa to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/ea9c4f54519e4ddb96809922df365bfa 2024-12-12T16:27:27,931 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b8092130ab9e434d9e62764284809d38 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b8092130ab9e434d9e62764284809d38 2024-12-12T16:27:27,931 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/94cd97022f0e460095e6141e85433ba5 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/94cd97022f0e460095e6141e85433ba5 2024-12-12T16:27:27,931 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b8c2eba3df9c40acab6aaf5d3a879424 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b8c2eba3df9c40acab6aaf5d3a879424 2024-12-12T16:27:27,931 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/bda905ac0fcd4dafbb1e919009168979 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/bda905ac0fcd4dafbb1e919009168979 2024-12-12T16:27:27,932 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/4ece7494fa49474dbd5cd24655f62df7 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/4ece7494fa49474dbd5cd24655f62df7 2024-12-12T16:27:27,933 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/99f217206aac4acab843257267ec59d2 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/99f217206aac4acab843257267ec59d2 2024-12-12T16:27:27,933 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/552c37fa55b343479ffd166a3393bdf1 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/552c37fa55b343479ffd166a3393bdf1 2024-12-12T16:27:27,933 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/85a7ae74e3cc494f922cec7afa048657 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/85a7ae74e3cc494f922cec7afa048657 2024-12-12T16:27:27,933 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/56cc04b39c0e42e180d7e634becc7a70 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/56cc04b39c0e42e180d7e634becc7a70 2024-12-12T16:27:27,934 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/26e5133e4cf24ebdac8b8a8c21e01cb9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/26e5133e4cf24ebdac8b8a8c21e01cb9 2024-12-12T16:27:27,952 DEBUG [StoreCloser-TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/8e746b81d1b040848e4459870142b3e0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/67bcb6293db3458eaea3f54723250b22, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/5ee6c18f9a1f4386ba4a5f825c4c0601, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/8e56483de8ad4bc782f349707117331a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/d7001e1b241c4e6d9f0f8e1be0cbc0d5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f926d053f316444a981b217bf788bb97, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/06e56604a74942febc1a3fc9be306cc1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/9ff6664c67854f3eb5b42081bf76263f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/c4058445651f4343bd8465ba3e2fe75a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/9cb383709d3745daa6e16060ff3d70db, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/1755ee0659d74535989af2a323b8040e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/245c9293a1a24a8fb9ccb74f5897fad7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/6371696bf6e446d69448f2baa1dd93f8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/abdcc222e8664aa9aece339911e2dfa9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/2b9588832f3c4d54a3e6dd9ac7c41e92, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/84771d45202844c4b3356f4ec2e58959, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/bce9270aa6e64f7a834a416978725dc9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/48bac41b55a7436d9e2e36dbc8ff1ba6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/71cd824f849d49e0bb4754520294b369, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/25b9f1aafc6d4dedb46f2773c50d9366, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/3f2018d854f84f6ba5211e756b71f572, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/ff8365cce99a4c31b33f7eb69e72ac7b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/755176d4a0bd45a1b2636eb951e83d9e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f6a1ace9cd8346f6ac4ad5b6bd077939, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/1a33477c16e44991b6552d46ff749ccf, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/b03a96bf12a54e0f840df8330fe2d5bf, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/1e94d32aee964f00a5f9274fd927c1ae, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/c318490d103c4ce3a8c2b2e09c999b46, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f17fd6b2921d4ad3ad8815dc4792a40f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/66a3416074824bbea7de508cb82abf43, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/9473338fa4fc42e58616e16b253c9c0b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/7cbe4cee90b044a3b8f28af3b7ac0b9a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/a1f3a29477e142f09bcb37f7678064ff, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/ea0b1eb6928a4769a7e01a4291ee8d14, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/7ebfa3b8601d48bb9e6b775b44f8dfb5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/870f898b757e4ceb832b7482c7987fa8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/55adae08c1bd4aa59514d0a2422fd4c8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/c9b56d1c32bb4c24876e58d57600c07e] to archive 2024-12-12T16:27:27,954 DEBUG [StoreCloser-TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:27:27,957 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/d7001e1b241c4e6d9f0f8e1be0cbc0d5 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/d7001e1b241c4e6d9f0f8e1be0cbc0d5 2024-12-12T16:27:27,957 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/67bcb6293db3458eaea3f54723250b22 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/67bcb6293db3458eaea3f54723250b22 2024-12-12T16:27:27,958 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/8e56483de8ad4bc782f349707117331a to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/8e56483de8ad4bc782f349707117331a 2024-12-12T16:27:27,958 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/9ff6664c67854f3eb5b42081bf76263f to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/9ff6664c67854f3eb5b42081bf76263f 2024-12-12T16:27:27,958 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/5ee6c18f9a1f4386ba4a5f825c4c0601 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/5ee6c18f9a1f4386ba4a5f825c4c0601 2024-12-12T16:27:27,958 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/8e746b81d1b040848e4459870142b3e0 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/8e746b81d1b040848e4459870142b3e0 2024-12-12T16:27:27,958 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f926d053f316444a981b217bf788bb97 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f926d053f316444a981b217bf788bb97 2024-12-12T16:27:27,958 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/06e56604a74942febc1a3fc9be306cc1 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/06e56604a74942febc1a3fc9be306cc1 2024-12-12T16:27:27,960 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/9cb383709d3745daa6e16060ff3d70db to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/9cb383709d3745daa6e16060ff3d70db 2024-12-12T16:27:27,961 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/abdcc222e8664aa9aece339911e2dfa9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/abdcc222e8664aa9aece339911e2dfa9 2024-12-12T16:27:27,961 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/1755ee0659d74535989af2a323b8040e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/1755ee0659d74535989af2a323b8040e 2024-12-12T16:27:27,961 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/6371696bf6e446d69448f2baa1dd93f8 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/6371696bf6e446d69448f2baa1dd93f8 2024-12-12T16:27:27,961 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/c4058445651f4343bd8465ba3e2fe75a to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/c4058445651f4343bd8465ba3e2fe75a 2024-12-12T16:27:27,962 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/245c9293a1a24a8fb9ccb74f5897fad7 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/245c9293a1a24a8fb9ccb74f5897fad7 2024-12-12T16:27:27,962 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/2b9588832f3c4d54a3e6dd9ac7c41e92 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/2b9588832f3c4d54a3e6dd9ac7c41e92 2024-12-12T16:27:27,962 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/84771d45202844c4b3356f4ec2e58959 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/84771d45202844c4b3356f4ec2e58959 2024-12-12T16:27:27,964 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/3f2018d854f84f6ba5211e756b71f572 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/3f2018d854f84f6ba5211e756b71f572 2024-12-12T16:27:27,964 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/bce9270aa6e64f7a834a416978725dc9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/bce9270aa6e64f7a834a416978725dc9 2024-12-12T16:27:27,964 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/71cd824f849d49e0bb4754520294b369 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/71cd824f849d49e0bb4754520294b369 2024-12-12T16:27:27,965 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/ff8365cce99a4c31b33f7eb69e72ac7b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/ff8365cce99a4c31b33f7eb69e72ac7b 2024-12-12T16:27:27,965 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/48bac41b55a7436d9e2e36dbc8ff1ba6 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/48bac41b55a7436d9e2e36dbc8ff1ba6 2024-12-12T16:27:27,965 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/25b9f1aafc6d4dedb46f2773c50d9366 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/25b9f1aafc6d4dedb46f2773c50d9366 2024-12-12T16:27:27,965 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/755176d4a0bd45a1b2636eb951e83d9e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/755176d4a0bd45a1b2636eb951e83d9e 2024-12-12T16:27:27,966 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f6a1ace9cd8346f6ac4ad5b6bd077939 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f6a1ace9cd8346f6ac4ad5b6bd077939 2024-12-12T16:27:27,967 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/b03a96bf12a54e0f840df8330fe2d5bf to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/b03a96bf12a54e0f840df8330fe2d5bf 2024-12-12T16:27:27,968 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/1a33477c16e44991b6552d46ff749ccf to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/1a33477c16e44991b6552d46ff749ccf 2024-12-12T16:27:27,968 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/1e94d32aee964f00a5f9274fd927c1ae to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/1e94d32aee964f00a5f9274fd927c1ae 2024-12-12T16:27:27,968 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f17fd6b2921d4ad3ad8815dc4792a40f to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f17fd6b2921d4ad3ad8815dc4792a40f 2024-12-12T16:27:27,968 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/c318490d103c4ce3a8c2b2e09c999b46 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/c318490d103c4ce3a8c2b2e09c999b46 2024-12-12T16:27:27,969 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/7cbe4cee90b044a3b8f28af3b7ac0b9a to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/7cbe4cee90b044a3b8f28af3b7ac0b9a 2024-12-12T16:27:27,969 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/9473338fa4fc42e58616e16b253c9c0b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/9473338fa4fc42e58616e16b253c9c0b 2024-12-12T16:27:27,970 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/66a3416074824bbea7de508cb82abf43 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/66a3416074824bbea7de508cb82abf43 2024-12-12T16:27:27,971 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/a1f3a29477e142f09bcb37f7678064ff to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/a1f3a29477e142f09bcb37f7678064ff 2024-12-12T16:27:27,971 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/ea0b1eb6928a4769a7e01a4291ee8d14 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/ea0b1eb6928a4769a7e01a4291ee8d14 2024-12-12T16:27:27,971 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/55adae08c1bd4aa59514d0a2422fd4c8 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/55adae08c1bd4aa59514d0a2422fd4c8 2024-12-12T16:27:27,971 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/7ebfa3b8601d48bb9e6b775b44f8dfb5 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/7ebfa3b8601d48bb9e6b775b44f8dfb5 2024-12-12T16:27:27,972 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/870f898b757e4ceb832b7482c7987fa8 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/870f898b757e4ceb832b7482c7987fa8 2024-12-12T16:27:27,972 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/c9b56d1c32bb4c24876e58d57600c07e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/c9b56d1c32bb4c24876e58d57600c07e 2024-12-12T16:27:27,974 DEBUG [StoreCloser-TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6a486ea3422547ca8d6c8260be212a9b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/435c865d72694a919527bd35b258b733, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/1981242d39b148a991f60ce3cfce17d5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/48cc60ae7c854f9fadbb095d4b7f32ee, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/17ba32f110e94ca8b69014f889d1099a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/223a365727714d4295db6e6d53bb584f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/d597f6ce6bd644dfaf71cefaab7d118c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/88daa93c782c4000b40f7f01d9921bae, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/8093f08c9e374f2499dae4f16bf15166, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/f88203b02c8041a9ba6668013b667da1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/7fe9d4d6a93248f1a38b7fde90c32183, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/985344ea826143768369a31f786e205b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/efd99a9c6c2042ebbe4ee4ef07857423, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/d7b1d8a5e8b2419bb22b4e0e19dff7ab, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/171a72c4f6d04ed8a4d0da5dd1c5f2d0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/4389437e8e1d4fb187372f3582852ed9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/73b80e4e0bdb49359acb79613f23a904, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/19dd418d05d64b99aa556954f8cc7147, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/822f4eb2675d4721b5ee245f9394ee74, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/74ee1764a6b242cabe98ee2f9f6d43e7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/12ba351cab2f4ed8b80100f982669678, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/caa336c852854041b2fa0072fab5a582, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/d95b33cf3ec7454ab27703aa5acb49ff, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/22836a4a1b9746618b04bd66deddcef4, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/5873d095ac1f4462beff1d52a498e8b3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/890372f140c846c28453984931d7d3de, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/00a68d4586d34992ac400d502131ba84, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/f220cfb931344de5bed4dd64c710c7e0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/f8508671e91640dea2245a5f43481eba, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/3d85892f00174e48a8d8cae8d491820b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6cde229122c54aa7a0a62e3715f4562c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/5dc5e53e99594381879b9698793ed29e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/e2e0a8ed8cfb48d3a04261599070d0fd, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/0436f2bbc74b4645886b9d75dc20351a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/cd7492eb1a2d4228835c3ff0db1491cd, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/ef5e2e2f23044c64adc3ed2d532f577b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/bb5543e60c6f455ab71c699a30970f23, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6a8092cf167946c0bd0079ed098b2d36] to archive 2024-12-12T16:27:27,976 DEBUG [StoreCloser-TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:27:27,978 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/1981242d39b148a991f60ce3cfce17d5 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/1981242d39b148a991f60ce3cfce17d5 2024-12-12T16:27:27,979 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/48cc60ae7c854f9fadbb095d4b7f32ee to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/48cc60ae7c854f9fadbb095d4b7f32ee 2024-12-12T16:27:27,979 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/223a365727714d4295db6e6d53bb584f to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/223a365727714d4295db6e6d53bb584f 2024-12-12T16:27:27,979 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/d597f6ce6bd644dfaf71cefaab7d118c to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/d597f6ce6bd644dfaf71cefaab7d118c 2024-12-12T16:27:27,979 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6a486ea3422547ca8d6c8260be212a9b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6a486ea3422547ca8d6c8260be212a9b 2024-12-12T16:27:27,980 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/88daa93c782c4000b40f7f01d9921bae to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/88daa93c782c4000b40f7f01d9921bae 2024-12-12T16:27:27,980 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/435c865d72694a919527bd35b258b733 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/435c865d72694a919527bd35b258b733 2024-12-12T16:27:27,980 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/17ba32f110e94ca8b69014f889d1099a to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/17ba32f110e94ca8b69014f889d1099a 2024-12-12T16:27:27,982 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/985344ea826143768369a31f786e205b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/985344ea826143768369a31f786e205b 2024-12-12T16:27:27,982 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/8093f08c9e374f2499dae4f16bf15166 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/8093f08c9e374f2499dae4f16bf15166 2024-12-12T16:27:27,982 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/7fe9d4d6a93248f1a38b7fde90c32183 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/7fe9d4d6a93248f1a38b7fde90c32183 2024-12-12T16:27:27,982 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/f88203b02c8041a9ba6668013b667da1 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/f88203b02c8041a9ba6668013b667da1 2024-12-12T16:27:27,982 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/efd99a9c6c2042ebbe4ee4ef07857423 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/efd99a9c6c2042ebbe4ee4ef07857423 2024-12-12T16:27:27,983 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/d7b1d8a5e8b2419bb22b4e0e19dff7ab to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/d7b1d8a5e8b2419bb22b4e0e19dff7ab 2024-12-12T16:27:27,983 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/4389437e8e1d4fb187372f3582852ed9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/4389437e8e1d4fb187372f3582852ed9 2024-12-12T16:27:27,984 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/171a72c4f6d04ed8a4d0da5dd1c5f2d0 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/171a72c4f6d04ed8a4d0da5dd1c5f2d0 2024-12-12T16:27:27,984 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/73b80e4e0bdb49359acb79613f23a904 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/73b80e4e0bdb49359acb79613f23a904 2024-12-12T16:27:27,984 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/822f4eb2675d4721b5ee245f9394ee74 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/822f4eb2675d4721b5ee245f9394ee74 2024-12-12T16:27:27,985 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/19dd418d05d64b99aa556954f8cc7147 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/19dd418d05d64b99aa556954f8cc7147 2024-12-12T16:27:27,986 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/12ba351cab2f4ed8b80100f982669678 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/12ba351cab2f4ed8b80100f982669678 2024-12-12T16:27:27,986 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/74ee1764a6b242cabe98ee2f9f6d43e7 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/74ee1764a6b242cabe98ee2f9f6d43e7 2024-12-12T16:27:27,987 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/caa336c852854041b2fa0072fab5a582 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/caa336c852854041b2fa0072fab5a582 2024-12-12T16:27:27,987 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/d95b33cf3ec7454ab27703aa5acb49ff to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/d95b33cf3ec7454ab27703aa5acb49ff 2024-12-12T16:27:27,988 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/5873d095ac1f4462beff1d52a498e8b3 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/5873d095ac1f4462beff1d52a498e8b3 2024-12-12T16:27:27,988 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/00a68d4586d34992ac400d502131ba84 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/00a68d4586d34992ac400d502131ba84 2024-12-12T16:27:27,988 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/22836a4a1b9746618b04bd66deddcef4 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/22836a4a1b9746618b04bd66deddcef4 2024-12-12T16:27:27,988 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/890372f140c846c28453984931d7d3de to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/890372f140c846c28453984931d7d3de 2024-12-12T16:27:27,989 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/3d85892f00174e48a8d8cae8d491820b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/3d85892f00174e48a8d8cae8d491820b 2024-12-12T16:27:27,989 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/f220cfb931344de5bed4dd64c710c7e0 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/f220cfb931344de5bed4dd64c710c7e0 2024-12-12T16:27:27,989 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6cde229122c54aa7a0a62e3715f4562c to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6cde229122c54aa7a0a62e3715f4562c 2024-12-12T16:27:27,990 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/f8508671e91640dea2245a5f43481eba to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/f8508671e91640dea2245a5f43481eba 2024-12-12T16:27:27,990 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/5dc5e53e99594381879b9698793ed29e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/5dc5e53e99594381879b9698793ed29e 2024-12-12T16:27:27,990 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/e2e0a8ed8cfb48d3a04261599070d0fd to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/e2e0a8ed8cfb48d3a04261599070d0fd 2024-12-12T16:27:27,991 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/cd7492eb1a2d4228835c3ff0db1491cd to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/cd7492eb1a2d4228835c3ff0db1491cd 2024-12-12T16:27:27,991 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/0436f2bbc74b4645886b9d75dc20351a to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/0436f2bbc74b4645886b9d75dc20351a 2024-12-12T16:27:27,992 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/ef5e2e2f23044c64adc3ed2d532f577b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/ef5e2e2f23044c64adc3ed2d532f577b 2024-12-12T16:27:27,992 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6a8092cf167946c0bd0079ed098b2d36 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6a8092cf167946c0bd0079ed098b2d36 2024-12-12T16:27:27,992 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/bb5543e60c6f455ab71c699a30970f23 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/bb5543e60c6f455ab71c699a30970f23 2024-12-12T16:27:27,997 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/recovered.edits/578.seqid, newMaxSeqId=578, maxSeqId=1 2024-12-12T16:27:28,000 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8. 2024-12-12T16:27:28,000 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1635): Region close journal for b22602467dd4e6c94f26649b7855f8e8: 2024-12-12T16:27:28,002 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(170): Closed b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:28,002 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=b22602467dd4e6c94f26649b7855f8e8, regionState=CLOSED 2024-12-12T16:27:28,005 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-12-12T16:27:28,005 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseRegionProcedure b22602467dd4e6c94f26649b7855f8e8, server=4f6a4780a2f6,41933,1734020809476 in 1.5080 sec 2024-12-12T16:27:28,006 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=33 2024-12-12T16:27:28,006 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=33, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b22602467dd4e6c94f26649b7855f8e8, UNASSIGN in 1.5130 sec 2024-12-12T16:27:28,009 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-12T16:27:28,009 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5200 sec 2024-12-12T16:27:28,010 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020848010"}]},"ts":"1734020848010"} 2024-12-12T16:27:28,011 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T16:27:28,013 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T16:27:28,015 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5400 sec 2024-12-12T16:27:28,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-12T16:27:28,586 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-12-12T16:27:28,589 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T16:27:28,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:27:28,594 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:27:28,596 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=36, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:27:28,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-12T16:27:28,599 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:28,603 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A, FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B, FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C, FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/recovered.edits] 2024-12-12T16:27:28,608 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/2d595d55db394cd6a224a95c4a484a90 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/2d595d55db394cd6a224a95c4a484a90 2024-12-12T16:27:28,608 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cfa98ce2ea7c4753b9211b0845a9c9c3 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/cfa98ce2ea7c4753b9211b0845a9c9c3 2024-12-12T16:27:28,608 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b4dd61a400d547b3ba45608279d1c5f7 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/A/b4dd61a400d547b3ba45608279d1c5f7 2024-12-12T16:27:28,612 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/65d65ba5ab654db387d702f4bf50cc9d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/65d65ba5ab654db387d702f4bf50cc9d 2024-12-12T16:27:28,612 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/5420bbcbf6c24fccb2dfe57c4c9bc6a9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/5420bbcbf6c24fccb2dfe57c4c9bc6a9 2024-12-12T16:27:28,612 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f87c0c2e3d874091893efd7ed37d8753 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/B/f87c0c2e3d874091893efd7ed37d8753 2024-12-12T16:27:28,615 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/5a46a2111a3847a585c16f91ad799fc0 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/5a46a2111a3847a585c16f91ad799fc0 2024-12-12T16:27:28,615 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/0605153573584777a77463eaa440c11a to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/0605153573584777a77463eaa440c11a 2024-12-12T16:27:28,616 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6944127f063f45768ecd047e4b880c37 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/C/6944127f063f45768ecd047e4b880c37 2024-12-12T16:27:28,619 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/recovered.edits/578.seqid to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8/recovered.edits/578.seqid 2024-12-12T16:27:28,620 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/b22602467dd4e6c94f26649b7855f8e8 2024-12-12T16:27:28,620 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T16:27:28,625 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=36, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:27:28,630 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-12T16:27:28,633 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T16:27:28,665 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T16:27:28,666 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=36, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:27:28,666 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T16:27:28,667 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734020848666"}]},"ts":"9223372036854775807"} 2024-12-12T16:27:28,670 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T16:27:28,670 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => b22602467dd4e6c94f26649b7855f8e8, NAME => 'TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T16:27:28,670 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T16:27:28,670 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734020848670"}]},"ts":"9223372036854775807"} 2024-12-12T16:27:28,672 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T16:27:28,674 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=36, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:27:28,676 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 85 msec 2024-12-12T16:27:28,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-12T16:27:28,698 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-12-12T16:27:28,709 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=246 (was 219) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x526f2908-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x526f2908-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;4f6a4780a2f6:41933-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-84974713_22 at /127.0.0.1:40492 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x526f2908-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x526f2908-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=449 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=318 (was 148) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7890 (was 8447) 2024-12-12T16:27:28,719 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=246, OpenFileDescriptor=449, MaxFileDescriptor=1048576, SystemLoadAverage=318, ProcessCount=11, AvailableMemoryMB=7890 2024-12-12T16:27:28,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T16:27:28,722 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T16:27:28,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=37, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T16:27:28,724 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T16:27:28,724 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:28,724 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 37 2024-12-12T16:27:28,725 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T16:27:28,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-12T16:27:28,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741962_1138 (size=963) 2024-12-12T16:27:28,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-12T16:27:29,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-12T16:27:29,134 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc 2024-12-12T16:27:29,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741963_1139 (size=53) 2024-12-12T16:27:29,216 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-12T16:27:29,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-12T16:27:29,541 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:27:29,541 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 044396069fa7748e35e43f97f084a6ce, disabling compactions & flushes 2024-12-12T16:27:29,541 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:29,541 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:29,541 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. after waiting 0 ms 2024-12-12T16:27:29,541 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:29,541 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:29,541 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:29,542 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T16:27:29,543 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734020849542"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734020849542"}]},"ts":"1734020849542"} 2024-12-12T16:27:29,544 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T16:27:29,545 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T16:27:29,545 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020849545"}]},"ts":"1734020849545"} 2024-12-12T16:27:29,546 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T16:27:29,550 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=044396069fa7748e35e43f97f084a6ce, ASSIGN}] 2024-12-12T16:27:29,551 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=044396069fa7748e35e43f97f084a6ce, ASSIGN 2024-12-12T16:27:29,552 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=044396069fa7748e35e43f97f084a6ce, ASSIGN; state=OFFLINE, location=4f6a4780a2f6,41933,1734020809476; forceNewPlan=false, retain=false 2024-12-12T16:27:29,702 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=044396069fa7748e35e43f97f084a6ce, regionState=OPENING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:29,704 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; OpenRegionProcedure 044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:27:29,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-12T16:27:29,856 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:29,860 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:29,860 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7285): Opening region: {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} 2024-12-12T16:27:29,860 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:29,860 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:27:29,860 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7327): checking encryption for 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:29,860 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7330): checking classloading for 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:29,862 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:29,863 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:27:29,863 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 044396069fa7748e35e43f97f084a6ce columnFamilyName A 2024-12-12T16:27:29,864 DEBUG [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:29,864 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] regionserver.HStore(327): Store=044396069fa7748e35e43f97f084a6ce/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:27:29,864 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:29,865 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:27:29,866 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 044396069fa7748e35e43f97f084a6ce columnFamilyName B 2024-12-12T16:27:29,866 DEBUG [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:29,867 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] regionserver.HStore(327): Store=044396069fa7748e35e43f97f084a6ce/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:27:29,867 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:29,868 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:27:29,868 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 044396069fa7748e35e43f97f084a6ce columnFamilyName C 2024-12-12T16:27:29,868 DEBUG [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:29,868 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] regionserver.HStore(327): Store=044396069fa7748e35e43f97f084a6ce/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:27:29,868 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:29,869 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:29,869 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:29,871 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T16:27:29,872 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1085): writing seq id for 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:29,874 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T16:27:29,874 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1102): Opened 044396069fa7748e35e43f97f084a6ce; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59903831, jitterRate=-0.10736335813999176}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T16:27:29,875 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1001): Region open journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:29,876 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., pid=39, masterSystemTime=1734020849855 2024-12-12T16:27:29,877 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:29,878 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:29,878 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=044396069fa7748e35e43f97f084a6ce, regionState=OPEN, openSeqNum=2, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:29,880 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-12-12T16:27:29,880 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; OpenRegionProcedure 044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 in 175 msec 2024-12-12T16:27:29,882 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-12-12T16:27:29,882 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=044396069fa7748e35e43f97f084a6ce, ASSIGN in 330 msec 2024-12-12T16:27:29,882 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T16:27:29,883 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020849883"}]},"ts":"1734020849883"} 2024-12-12T16:27:29,884 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T16:27:29,886 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T16:27:29,887 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1640 sec 2024-12-12T16:27:30,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-12T16:27:30,831 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 37 completed 2024-12-12T16:27:30,832 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6eb305fc to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@245d85d 2024-12-12T16:27:30,836 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2471b8a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:27:30,838 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:27:30,840 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55176, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:27:30,841 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T16:27:30,843 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51674, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T16:27:30,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T16:27:30,849 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T16:27:30,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=40, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-12T16:27:30,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741964_1140 (size=999) 2024-12-12T16:27:31,271 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-12T16:27:31,271 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-12T16:27:31,275 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T16:27:31,283 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=044396069fa7748e35e43f97f084a6ce, REOPEN/MOVE}] 2024-12-12T16:27:31,284 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=044396069fa7748e35e43f97f084a6ce, REOPEN/MOVE 2024-12-12T16:27:31,285 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=044396069fa7748e35e43f97f084a6ce, regionState=CLOSING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:31,286 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T16:27:31,286 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE; CloseRegionProcedure 044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:27:31,437 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:31,438 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(124): Close 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:31,438 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T16:27:31,438 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1681): Closing 044396069fa7748e35e43f97f084a6ce, disabling compactions & flushes 2024-12-12T16:27:31,438 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:31,438 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:31,438 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. after waiting 0 ms 2024-12-12T16:27:31,438 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:31,442 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-12T16:27:31,443 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:31,443 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1635): Region close journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:31,443 WARN [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegionServer(3786): Not adding moved region record: 044396069fa7748e35e43f97f084a6ce to self. 2024-12-12T16:27:31,445 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(170): Closed 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:31,445 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=044396069fa7748e35e43f97f084a6ce, regionState=CLOSED 2024-12-12T16:27:31,447 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-12-12T16:27:31,448 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; CloseRegionProcedure 044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 in 160 msec 2024-12-12T16:27:31,448 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=044396069fa7748e35e43f97f084a6ce, REOPEN/MOVE; state=CLOSED, location=4f6a4780a2f6,41933,1734020809476; forceNewPlan=false, retain=true 2024-12-12T16:27:31,599 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=044396069fa7748e35e43f97f084a6ce, regionState=OPENING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:31,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=42, state=RUNNABLE; OpenRegionProcedure 044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:27:31,752 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:31,755 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:31,756 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7285): Opening region: {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} 2024-12-12T16:27:31,756 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:31,756 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:27:31,756 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7327): checking encryption for 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:31,756 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7330): checking classloading for 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:31,759 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:31,759 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:27:31,764 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 044396069fa7748e35e43f97f084a6ce columnFamilyName A 2024-12-12T16:27:31,766 DEBUG [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:31,766 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] regionserver.HStore(327): Store=044396069fa7748e35e43f97f084a6ce/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:27:31,767 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:31,768 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:27:31,768 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 044396069fa7748e35e43f97f084a6ce columnFamilyName B 2024-12-12T16:27:31,768 DEBUG [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:31,768 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] regionserver.HStore(327): Store=044396069fa7748e35e43f97f084a6ce/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:27:31,768 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:31,769 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:27:31,769 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 044396069fa7748e35e43f97f084a6ce columnFamilyName C 2024-12-12T16:27:31,769 DEBUG [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:31,770 INFO [StoreOpener-044396069fa7748e35e43f97f084a6ce-1 {}] regionserver.HStore(327): Store=044396069fa7748e35e43f97f084a6ce/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:27:31,770 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:31,771 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:31,772 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:31,773 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T16:27:31,775 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1085): writing seq id for 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:31,776 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1102): Opened 044396069fa7748e35e43f97f084a6ce; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68623551, jitterRate=0.022570595145225525}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T16:27:31,777 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1001): Region open journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:31,778 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., pid=44, masterSystemTime=1734020851752 2024-12-12T16:27:31,780 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:31,780 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:31,780 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=044396069fa7748e35e43f97f084a6ce, regionState=OPEN, openSeqNum=5, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:31,783 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=42 2024-12-12T16:27:31,784 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=42, state=SUCCESS; OpenRegionProcedure 044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 in 182 msec 2024-12-12T16:27:31,785 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-12-12T16:27:31,785 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=044396069fa7748e35e43f97f084a6ce, REOPEN/MOVE in 501 msec 2024-12-12T16:27:31,787 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-12-12T16:27:31,787 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 512 msec 2024-12-12T16:27:31,792 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 940 msec 2024-12-12T16:27:31,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-12-12T16:27:31,799 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x491ea2ee to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5b55744e 2024-12-12T16:27:31,809 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e3a4420, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:27:31,810 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0b44b1e5 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@454f1431 2024-12-12T16:27:31,813 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24f64590, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:27:31,815 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x46114993 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@769942d9 2024-12-12T16:27:31,817 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c5c4716, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:27:31,819 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2885d2d9 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@cb464a 2024-12-12T16:27:31,822 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22e911df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:27:31,823 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x152377d4 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@517ff977 2024-12-12T16:27:31,825 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b727d6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:27:31,826 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3448d233 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c7940d9 2024-12-12T16:27:31,829 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@341384e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:27:31,830 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a11164b to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c38ee58 2024-12-12T16:27:31,833 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26b120d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:27:31,834 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08a7e1dd to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@630684bf 2024-12-12T16:27:31,836 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c1ec7ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:27:31,837 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31a027db to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@66547e2c 2024-12-12T16:27:31,840 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ccff4bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:27:31,848 DEBUG [hconnection-0x3c95fcd6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:27:31,852 DEBUG [hconnection-0x531be870-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:27:31,852 DEBUG [hconnection-0x1182f757-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:27:31,854 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55178, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:27:31,854 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:27:31,879 DEBUG [hconnection-0x4c751f2a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:27:31,882 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55190, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:27:31,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:31,883 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:27:31,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:31,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:31,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:31,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:31,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:31,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:31,886 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55194, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:27:31,897 DEBUG [hconnection-0x3b068cd0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:27:31,898 DEBUG [hconnection-0x2ea9014a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:27:31,899 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55212, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:27:31,899 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55198, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:27:31,904 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:27:31,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-12-12T16:27:31,905 DEBUG [hconnection-0x10cb5edf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:27:31,907 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:27:31,907 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55222, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:27:31,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-12T16:27:31,907 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:27:31,907 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:27:31,922 DEBUG [hconnection-0x31153c1b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:27:31,923 DEBUG [hconnection-0x3627e90e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:27:31,924 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55226, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:27:31,925 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55242, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:27:31,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:31,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020911924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:31,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:31,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020911924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:31,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:31,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020911927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:31,936 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212d1e9e73ec65a42f3af9ce6b8e03b42e6_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020851877/Put/seqid=0 2024-12-12T16:27:31,937 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:31,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020911927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:31,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:31,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020911929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:31,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741965_1141 (size=12154) 2024-12-12T16:27:32,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-12T16:27:32,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:32,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020912042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:32,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020912043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:32,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020912043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:32,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020912043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:32,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020912043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,060 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T16:27:32,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:32,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:32,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:32,061 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:32,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:32,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:32,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-12T16:27:32,214 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,215 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T16:27:32,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:32,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:32,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:32,215 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:32,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:32,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:32,253 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:32,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020912251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,253 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:32,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020912253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:32,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020912253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:32,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020912254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:32,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020912255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,368 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T16:27:32,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:32,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:32,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:32,369 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:32,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:32,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:32,372 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:32,378 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212d1e9e73ec65a42f3af9ce6b8e03b42e6_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d1e9e73ec65a42f3af9ce6b8e03b42e6_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:32,381 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/b1bb9880adbb4181aa9f2ee1f6dd4cfc, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:32,391 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/b1bb9880adbb4181aa9f2ee1f6dd4cfc is 175, key is test_row_0/A:col10/1734020851877/Put/seqid=0 2024-12-12T16:27:32,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741966_1142 (size=30955) 2024-12-12T16:27:32,435 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/b1bb9880adbb4181aa9f2ee1f6dd4cfc 2024-12-12T16:27:32,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/55e53d566c4d4334ab7e4bb03fc43a27 is 50, key is test_row_0/B:col10/1734020851877/Put/seqid=0 2024-12-12T16:27:32,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741967_1143 (size=12001) 2024-12-12T16:27:32,498 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/55e53d566c4d4334ab7e4bb03fc43a27 2024-12-12T16:27:32,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-12T16:27:32,522 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,522 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T16:27:32,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:32,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:32,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:32,523 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:32,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:32,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:32,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/c170f1eb26df44c39eadc2404bcb7683 is 50, key is test_row_0/C:col10/1734020851877/Put/seqid=0 2024-12-12T16:27:32,557 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:32,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020912556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:32,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020912557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:32,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020912557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:32,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020912558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:32,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020912559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741968_1144 (size=12001) 2024-12-12T16:27:32,581 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/c170f1eb26df44c39eadc2404bcb7683 2024-12-12T16:27:32,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/b1bb9880adbb4181aa9f2ee1f6dd4cfc as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/b1bb9880adbb4181aa9f2ee1f6dd4cfc 2024-12-12T16:27:32,594 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/b1bb9880adbb4181aa9f2ee1f6dd4cfc, entries=150, sequenceid=15, filesize=30.2 K 2024-12-12T16:27:32,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/55e53d566c4d4334ab7e4bb03fc43a27 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/55e53d566c4d4334ab7e4bb03fc43a27 2024-12-12T16:27:32,609 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/55e53d566c4d4334ab7e4bb03fc43a27, entries=150, sequenceid=15, filesize=11.7 K 2024-12-12T16:27:32,611 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/c170f1eb26df44c39eadc2404bcb7683 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/c170f1eb26df44c39eadc2404bcb7683 2024-12-12T16:27:32,617 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/c170f1eb26df44c39eadc2404bcb7683, entries=150, sequenceid=15, filesize=11.7 K 2024-12-12T16:27:32,618 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 044396069fa7748e35e43f97f084a6ce in 735ms, sequenceid=15, compaction requested=false 2024-12-12T16:27:32,618 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-12T16:27:32,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:32,676 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:32,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T16:27:32,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:32,676 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T16:27:32,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:32,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:32,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:32,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:32,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:32,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:32,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121297f1978ff01e475aacf61d6776c95174_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020851925/Put/seqid=0 2024-12-12T16:27:32,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741969_1145 (size=12154) 2024-12-12T16:27:33,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-12T16:27:33,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:33,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:33,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020913073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020913073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020913074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020913076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020913077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:33,130 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121297f1978ff01e475aacf61d6776c95174_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121297f1978ff01e475aacf61d6776c95174_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:33,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/8ebe1def5afd40b3808246e3975e876b, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:33,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/8ebe1def5afd40b3808246e3975e876b is 175, key is test_row_0/A:col10/1734020851925/Put/seqid=0 2024-12-12T16:27:33,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741970_1146 (size=30955) 2024-12-12T16:27:33,161 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/8ebe1def5afd40b3808246e3975e876b 2024-12-12T16:27:33,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/9dd0b13de7514e59932a6a6d5175a3d5 is 50, key is test_row_0/B:col10/1734020851925/Put/seqid=0 2024-12-12T16:27:33,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020913179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020913180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020913179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020913179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,191 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020913185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741971_1147 (size=12001) 2024-12-12T16:27:33,214 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/9dd0b13de7514e59932a6a6d5175a3d5 2024-12-12T16:27:33,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/32f68485c48a45bda8b8786d4c1afd3f is 50, key is test_row_0/C:col10/1734020851925/Put/seqid=0 2024-12-12T16:27:33,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741972_1148 (size=12001) 2024-12-12T16:27:33,253 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/32f68485c48a45bda8b8786d4c1afd3f 2024-12-12T16:27:33,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/8ebe1def5afd40b3808246e3975e876b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/8ebe1def5afd40b3808246e3975e876b 2024-12-12T16:27:33,271 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/8ebe1def5afd40b3808246e3975e876b, entries=150, sequenceid=40, filesize=30.2 K 2024-12-12T16:27:33,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/9dd0b13de7514e59932a6a6d5175a3d5 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/9dd0b13de7514e59932a6a6d5175a3d5 2024-12-12T16:27:33,280 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/9dd0b13de7514e59932a6a6d5175a3d5, entries=150, sequenceid=40, filesize=11.7 K 2024-12-12T16:27:33,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/32f68485c48a45bda8b8786d4c1afd3f as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/32f68485c48a45bda8b8786d4c1afd3f 2024-12-12T16:27:33,288 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/32f68485c48a45bda8b8786d4c1afd3f, entries=150, sequenceid=40, filesize=11.7 K 2024-12-12T16:27:33,294 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 044396069fa7748e35e43f97f084a6ce in 617ms, sequenceid=40, compaction requested=false 2024-12-12T16:27:33,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:33,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:33,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-12-12T16:27:33,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-12-12T16:27:33,298 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-12-12T16:27:33,298 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3890 sec 2024-12-12T16:27:33,300 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 1.3950 sec 2024-12-12T16:27:33,390 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T16:27:33,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:33,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:33,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:33,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:33,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:33,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:33,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:33,403 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123ef5ec7273834198af2cc7afd0510644_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020853387/Put/seqid=0 2024-12-12T16:27:33,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020913426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020913425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020913428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020913431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020913431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741973_1149 (size=14594) 2024-12-12T16:27:33,442 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:33,448 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123ef5ec7273834198af2cc7afd0510644_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123ef5ec7273834198af2cc7afd0510644_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:33,451 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/2143f7b991ef43e881fd12333e89b911, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:33,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/2143f7b991ef43e881fd12333e89b911 is 175, key is test_row_0/A:col10/1734020853387/Put/seqid=0 2024-12-12T16:27:33,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741974_1150 (size=39549) 2024-12-12T16:27:33,496 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=52, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/2143f7b991ef43e881fd12333e89b911 2024-12-12T16:27:33,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/202fd29bd37c4265808ce2baeb1d0b36 is 50, key is test_row_0/B:col10/1734020853387/Put/seqid=0 2024-12-12T16:27:33,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020913532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020913533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020913533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020913540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020913540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741975_1151 (size=12001) 2024-12-12T16:27:33,559 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/202fd29bd37c4265808ce2baeb1d0b36 2024-12-12T16:27:33,568 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T16:27:33,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/f6f4b04463304333a8e9d002828015fc is 50, key is test_row_0/C:col10/1734020853387/Put/seqid=0 2024-12-12T16:27:33,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741976_1152 (size=12001) 2024-12-12T16:27:33,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020913736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020913737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,740 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020913739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020913744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:33,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:33,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020913746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-12T16:27:34,012 INFO [Thread-694 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-12-12T16:27:34,013 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:27:34,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-12-12T16:27:34,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-12T16:27:34,015 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:27:34,018 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:27:34,018 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:27:34,030 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/f6f4b04463304333a8e9d002828015fc 2024-12-12T16:27:34,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/2143f7b991ef43e881fd12333e89b911 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2143f7b991ef43e881fd12333e89b911 2024-12-12T16:27:34,042 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2143f7b991ef43e881fd12333e89b911, entries=200, sequenceid=52, filesize=38.6 K 2024-12-12T16:27:34,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020914044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020914044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020914044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/202fd29bd37c4265808ce2baeb1d0b36 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/202fd29bd37c4265808ce2baeb1d0b36 2024-12-12T16:27:34,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020914050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020914052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,062 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/202fd29bd37c4265808ce2baeb1d0b36, entries=150, sequenceid=52, filesize=11.7 K 2024-12-12T16:27:34,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/f6f4b04463304333a8e9d002828015fc as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/f6f4b04463304333a8e9d002828015fc 2024-12-12T16:27:34,071 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/f6f4b04463304333a8e9d002828015fc, entries=150, sequenceid=52, filesize=11.7 K 2024-12-12T16:27:34,072 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 044396069fa7748e35e43f97f084a6ce in 683ms, sequenceid=52, compaction requested=true 2024-12-12T16:27:34,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:34,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:27:34,072 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:34,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:34,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:27:34,072 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:34,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:34,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:27:34,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:34,073 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:34,073 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/A is initiating minor compaction (all files) 2024-12-12T16:27:34,073 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:34,073 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/B is initiating minor compaction (all files) 2024-12-12T16:27:34,073 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/A in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:34,074 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/B in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:34,074 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/55e53d566c4d4334ab7e4bb03fc43a27, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/9dd0b13de7514e59932a6a6d5175a3d5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/202fd29bd37c4265808ce2baeb1d0b36] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=35.2 K 2024-12-12T16:27:34,074 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/b1bb9880adbb4181aa9f2ee1f6dd4cfc, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/8ebe1def5afd40b3808246e3975e876b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2143f7b991ef43e881fd12333e89b911] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=99.1 K 2024-12-12T16:27:34,074 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:34,074 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/b1bb9880adbb4181aa9f2ee1f6dd4cfc, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/8ebe1def5afd40b3808246e3975e876b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2143f7b991ef43e881fd12333e89b911] 2024-12-12T16:27:34,074 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 55e53d566c4d4334ab7e4bb03fc43a27, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1734020851877 2024-12-12T16:27:34,074 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1bb9880adbb4181aa9f2ee1f6dd4cfc, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1734020851877 2024-12-12T16:27:34,075 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 9dd0b13de7514e59932a6a6d5175a3d5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734020851915 2024-12-12T16:27:34,077 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ebe1def5afd40b3808246e3975e876b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734020851915 2024-12-12T16:27:34,077 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 202fd29bd37c4265808ce2baeb1d0b36, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734020853070 2024-12-12T16:27:34,077 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2143f7b991ef43e881fd12333e89b911, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734020853070 2024-12-12T16:27:34,092 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#B#compaction#132 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:34,092 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/4a953b28d65b446680d7c72caed76ad0 is 50, key is test_row_0/B:col10/1734020853387/Put/seqid=0 2024-12-12T16:27:34,097 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:34,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-12T16:27:34,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741977_1153 (size=12104) 2024-12-12T16:27:34,118 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412129d6f98e3e00648bd930ce74a1916f05e_044396069fa7748e35e43f97f084a6ce store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:34,123 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412129d6f98e3e00648bd930ce74a1916f05e_044396069fa7748e35e43f97f084a6ce, store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:34,124 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412129d6f98e3e00648bd930ce74a1916f05e_044396069fa7748e35e43f97f084a6ce because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:34,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741978_1154 (size=4469) 2024-12-12T16:27:34,170 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-12T16:27:34,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:34,172 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T16:27:34,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:34,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:34,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:34,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:34,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:34,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:34,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212e613105939224c5183e0776d2b035e4d_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020853428/Put/seqid=0 2024-12-12T16:27:34,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741979_1155 (size=12154) 2024-12-12T16:27:34,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-12T16:27:34,525 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/4a953b28d65b446680d7c72caed76ad0 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/4a953b28d65b446680d7c72caed76ad0 2024-12-12T16:27:34,531 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/B of 044396069fa7748e35e43f97f084a6ce into 4a953b28d65b446680d7c72caed76ad0(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:34,532 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:34,532 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/B, priority=13, startTime=1734020854072; duration=0sec 2024-12-12T16:27:34,532 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:34,532 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:B 2024-12-12T16:27:34,532 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:34,534 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:34,534 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/C is initiating minor compaction (all files) 2024-12-12T16:27:34,534 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/C in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:34,534 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/c170f1eb26df44c39eadc2404bcb7683, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/32f68485c48a45bda8b8786d4c1afd3f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/f6f4b04463304333a8e9d002828015fc] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=35.2 K 2024-12-12T16:27:34,535 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting c170f1eb26df44c39eadc2404bcb7683, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1734020851877 2024-12-12T16:27:34,535 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 32f68485c48a45bda8b8786d4c1afd3f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734020851915 2024-12-12T16:27:34,536 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting f6f4b04463304333a8e9d002828015fc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734020853070 2024-12-12T16:27:34,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:34,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:34,558 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#A#compaction#133 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:34,560 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/71d955331f4d486d94e38b97d7d90ba5 is 175, key is test_row_0/A:col10/1734020853387/Put/seqid=0 2024-12-12T16:27:34,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020914564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,568 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#C#compaction#135 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:34,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020914564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,570 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/641fcf0e34a34517a7aaefe8c6de162d is 50, key is test_row_0/C:col10/1734020853387/Put/seqid=0 2024-12-12T16:27:34,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020914565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020914565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020914566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:34,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-12T16:27:34,619 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212e613105939224c5183e0776d2b035e4d_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e613105939224c5183e0776d2b035e4d_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:34,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/9abc097ce61a4b30b6f4395421c672c8, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:34,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741980_1156 (size=31058) 2024-12-12T16:27:34,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741981_1157 (size=12104) 2024-12-12T16:27:34,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/9abc097ce61a4b30b6f4395421c672c8 is 175, key is test_row_0/A:col10/1734020853428/Put/seqid=0 2024-12-12T16:27:34,636 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/641fcf0e34a34517a7aaefe8c6de162d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/641fcf0e34a34517a7aaefe8c6de162d 2024-12-12T16:27:34,642 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/C of 044396069fa7748e35e43f97f084a6ce into 641fcf0e34a34517a7aaefe8c6de162d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:34,642 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:34,642 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/C, priority=13, startTime=1734020854072; duration=0sec 2024-12-12T16:27:34,642 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:34,643 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:C 2024-12-12T16:27:34,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741982_1158 (size=30955) 2024-12-12T16:27:34,668 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/9abc097ce61a4b30b6f4395421c672c8 2024-12-12T16:27:34,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020914668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020914670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020914675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020914675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020914676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/52b43a5df4e6426fa587b9503779a142 is 50, key is test_row_0/B:col10/1734020853428/Put/seqid=0 2024-12-12T16:27:34,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741983_1159 (size=12001) 2024-12-12T16:27:34,711 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/52b43a5df4e6426fa587b9503779a142 2024-12-12T16:27:34,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/eb5c6d553303417c806332dd1586b747 is 50, key is test_row_0/C:col10/1734020853428/Put/seqid=0 2024-12-12T16:27:34,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741984_1160 (size=12001) 2024-12-12T16:27:34,753 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/eb5c6d553303417c806332dd1586b747 2024-12-12T16:27:34,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/9abc097ce61a4b30b6f4395421c672c8 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/9abc097ce61a4b30b6f4395421c672c8 2024-12-12T16:27:34,770 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/9abc097ce61a4b30b6f4395421c672c8, entries=150, sequenceid=77, filesize=30.2 K 2024-12-12T16:27:34,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/52b43a5df4e6426fa587b9503779a142 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/52b43a5df4e6426fa587b9503779a142 2024-12-12T16:27:34,780 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/52b43a5df4e6426fa587b9503779a142, entries=150, sequenceid=77, filesize=11.7 K 2024-12-12T16:27:34,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/eb5c6d553303417c806332dd1586b747 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/eb5c6d553303417c806332dd1586b747 2024-12-12T16:27:34,793 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/eb5c6d553303417c806332dd1586b747, entries=150, sequenceid=77, filesize=11.7 K 2024-12-12T16:27:34,794 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for 044396069fa7748e35e43f97f084a6ce in 622ms, sequenceid=77, compaction requested=false 2024-12-12T16:27:34,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:34,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:34,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-12T16:27:34,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-12T16:27:34,798 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-12-12T16:27:34,798 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 778 msec 2024-12-12T16:27:34,800 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 786 msec 2024-12-12T16:27:34,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:34,875 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T16:27:34,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:34,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:34,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:34,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:34,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:34,875 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:34,890 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212d5dfe804d60f47fcaf5cfdb45166e6f5_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020854564/Put/seqid=0 2024-12-12T16:27:34,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020914907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020914908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020914908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020914910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,918 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:34,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020914914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:34,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741985_1161 (size=12154) 2024-12-12T16:27:35,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020915015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020915017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020915022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020915022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020915023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,032 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/71d955331f4d486d94e38b97d7d90ba5 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/71d955331f4d486d94e38b97d7d90ba5 2024-12-12T16:27:35,040 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/A of 044396069fa7748e35e43f97f084a6ce into 71d955331f4d486d94e38b97d7d90ba5(size=30.3 K), total size for store is 60.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:35,040 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:35,040 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/A, priority=13, startTime=1734020854072; duration=0sec 2024-12-12T16:27:35,041 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:35,041 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:A 2024-12-12T16:27:35,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-12T16:27:35,120 INFO [Thread-694 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-12-12T16:27:35,122 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:27:35,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-12T16:27:35,124 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:27:35,124 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:27:35,125 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:27:35,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T16:27:35,173 INFO [master/4f6a4780a2f6:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-12T16:27:35,174 INFO [master/4f6a4780a2f6:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-12T16:27:35,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020915219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020915219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020915225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020915225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020915226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T16:27:35,279 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,280 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T16:27:35,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:35,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:35,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:35,281 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:35,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:35,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:35,331 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:35,336 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212d5dfe804d60f47fcaf5cfdb45166e6f5_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d5dfe804d60f47fcaf5cfdb45166e6f5_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:35,338 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/9f88e2c0f8ca4ffc8083e6c999cf8e64, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:35,338 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/9f88e2c0f8ca4ffc8083e6c999cf8e64 is 175, key is test_row_0/A:col10/1734020854564/Put/seqid=0 2024-12-12T16:27:35,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741986_1162 (size=30955) 2024-12-12T16:27:35,354 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/9f88e2c0f8ca4ffc8083e6c999cf8e64 2024-12-12T16:27:35,365 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/e2916e19d81446d59716aa12d28e0905 is 50, key is test_row_0/B:col10/1734020854564/Put/seqid=0 2024-12-12T16:27:35,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741987_1163 (size=12001) 2024-12-12T16:27:35,387 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/e2916e19d81446d59716aa12d28e0905 2024-12-12T16:27:35,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/7a994c318ba64baea43a23c23f6f18bc is 50, key is test_row_0/C:col10/1734020854564/Put/seqid=0 2024-12-12T16:27:35,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741988_1164 (size=12001) 2024-12-12T16:27:35,426 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/7a994c318ba64baea43a23c23f6f18bc 2024-12-12T16:27:35,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T16:27:35,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/9f88e2c0f8ca4ffc8083e6c999cf8e64 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/9f88e2c0f8ca4ffc8083e6c999cf8e64 2024-12-12T16:27:35,434 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,434 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T16:27:35,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:35,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:35,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:35,435 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:35,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:35,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:35,440 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/9f88e2c0f8ca4ffc8083e6c999cf8e64, entries=150, sequenceid=93, filesize=30.2 K 2024-12-12T16:27:35,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/e2916e19d81446d59716aa12d28e0905 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/e2916e19d81446d59716aa12d28e0905 2024-12-12T16:27:35,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/e2916e19d81446d59716aa12d28e0905, entries=150, sequenceid=93, filesize=11.7 K 2024-12-12T16:27:35,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/7a994c318ba64baea43a23c23f6f18bc as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/7a994c318ba64baea43a23c23f6f18bc 2024-12-12T16:27:35,458 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/7a994c318ba64baea43a23c23f6f18bc, entries=150, sequenceid=93, filesize=11.7 K 2024-12-12T16:27:35,459 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 044396069fa7748e35e43f97f084a6ce in 585ms, sequenceid=93, compaction requested=true 2024-12-12T16:27:35,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:35,460 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:35,461 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:35,461 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/A is initiating minor compaction (all files) 2024-12-12T16:27:35,461 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/A in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:35,461 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/71d955331f4d486d94e38b97d7d90ba5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/9abc097ce61a4b30b6f4395421c672c8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/9f88e2c0f8ca4ffc8083e6c999cf8e64] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=90.8 K 2024-12-12T16:27:35,461 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:35,462 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/71d955331f4d486d94e38b97d7d90ba5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/9abc097ce61a4b30b6f4395421c672c8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/9f88e2c0f8ca4ffc8083e6c999cf8e64] 2024-12-12T16:27:35,463 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71d955331f4d486d94e38b97d7d90ba5, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734020853070 2024-12-12T16:27:35,463 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9abc097ce61a4b30b6f4395421c672c8, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1734020853421 2024-12-12T16:27:35,464 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f88e2c0f8ca4ffc8083e6c999cf8e64, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734020854563 2024-12-12T16:27:35,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:27:35,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:35,465 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:35,467 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:35,467 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/B is initiating minor compaction (all files) 2024-12-12T16:27:35,467 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/B in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:35,468 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/4a953b28d65b446680d7c72caed76ad0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/52b43a5df4e6426fa587b9503779a142, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/e2916e19d81446d59716aa12d28e0905] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=35.3 K 2024-12-12T16:27:35,469 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a953b28d65b446680d7c72caed76ad0, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734020853070 2024-12-12T16:27:35,470 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 52b43a5df4e6426fa587b9503779a142, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1734020853421 2024-12-12T16:27:35,470 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting e2916e19d81446d59716aa12d28e0905, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734020854563 2024-12-12T16:27:35,477 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:35,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:27:35,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:35,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:27:35,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:35,485 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412121677d34fa3d441e2aaabd3515928cd61_044396069fa7748e35e43f97f084a6ce store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:35,486 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#B#compaction#142 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:35,487 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/ab60897030ca4081a9b598ef46236369 is 50, key is test_row_0/B:col10/1734020854564/Put/seqid=0 2024-12-12T16:27:35,500 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412121677d34fa3d441e2aaabd3515928cd61_044396069fa7748e35e43f97f084a6ce, store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:35,501 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412121677d34fa3d441e2aaabd3515928cd61_044396069fa7748e35e43f97f084a6ce because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:35,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741989_1165 (size=12207) 2024-12-12T16:27:35,522 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/ab60897030ca4081a9b598ef46236369 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/ab60897030ca4081a9b598ef46236369 2024-12-12T16:27:35,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741990_1166 (size=4469) 2024-12-12T16:27:35,530 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/B of 044396069fa7748e35e43f97f084a6ce into ab60897030ca4081a9b598ef46236369(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:35,530 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:35,530 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/B, priority=13, startTime=1734020855465; duration=0sec 2024-12-12T16:27:35,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:35,531 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T16:27:35,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:35,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:35,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:35,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:35,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:35,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:35,536 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#A#compaction#141 average throughput is 0.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:35,537 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/5f9f67b6cfb34603947ac5d74fb69b19 is 175, key is test_row_0/A:col10/1734020854564/Put/seqid=0 2024-12-12T16:27:35,531 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:35,539 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:B 2024-12-12T16:27:35,539 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:35,540 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:35,540 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/C is initiating minor compaction (all files) 2024-12-12T16:27:35,541 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/C in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:35,541 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/641fcf0e34a34517a7aaefe8c6de162d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/eb5c6d553303417c806332dd1586b747, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/7a994c318ba64baea43a23c23f6f18bc] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=35.3 K 2024-12-12T16:27:35,544 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 641fcf0e34a34517a7aaefe8c6de162d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734020853070 2024-12-12T16:27:35,544 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting eb5c6d553303417c806332dd1586b747, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1734020853421 2024-12-12T16:27:35,545 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a994c318ba64baea43a23c23f6f18bc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734020854563 2024-12-12T16:27:35,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020915547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020915547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020915549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020915551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020915552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,599 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,601 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#C#compaction#144 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:35,602 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/378708e3b44b49a08bd8e5ff2c1f15e9 is 50, key is test_row_0/C:col10/1734020854564/Put/seqid=0 2024-12-12T16:27:35,603 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T16:27:35,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:35,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:35,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:35,603 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:35,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:35,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212b612f636f48647aaa26a091ec82521ba_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020854912/Put/seqid=0 2024-12-12T16:27:35,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:35,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741991_1167 (size=31161) 2024-12-12T16:27:35,616 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/5f9f67b6cfb34603947ac5d74fb69b19 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/5f9f67b6cfb34603947ac5d74fb69b19 2024-12-12T16:27:35,623 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/A of 044396069fa7748e35e43f97f084a6ce into 5f9f67b6cfb34603947ac5d74fb69b19(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:35,623 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:35,623 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/A, priority=13, startTime=1734020855460; duration=0sec 2024-12-12T16:27:35,623 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:35,623 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:A 2024-12-12T16:27:35,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741992_1168 (size=12207) 2024-12-12T16:27:35,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741993_1169 (size=12154) 2024-12-12T16:27:35,643 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:35,650 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212b612f636f48647aaa26a091ec82521ba_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212b612f636f48647aaa26a091ec82521ba_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:35,651 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/a650423420a04646af6a7b4ca0350170, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:35,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/a650423420a04646af6a7b4ca0350170 is 175, key is test_row_0/A:col10/1734020854912/Put/seqid=0 2024-12-12T16:27:35,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020915655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020915656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020915656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020915658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020915659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741994_1170 (size=30955) 2024-12-12T16:27:35,670 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/a650423420a04646af6a7b4ca0350170 2024-12-12T16:27:35,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/ece10e06e8a14854b74ea859255b76d9 is 50, key is test_row_0/B:col10/1734020854912/Put/seqid=0 2024-12-12T16:27:35,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741995_1171 (size=12001) 2024-12-12T16:27:35,717 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/ece10e06e8a14854b74ea859255b76d9 2024-12-12T16:27:35,727 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/3aa264f9203b4731a2740e2776491c3d is 50, key is test_row_0/C:col10/1734020854912/Put/seqid=0 2024-12-12T16:27:35,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T16:27:35,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741996_1172 (size=12001) 2024-12-12T16:27:35,746 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/3aa264f9203b4731a2740e2776491c3d 2024-12-12T16:27:35,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/a650423420a04646af6a7b4ca0350170 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/a650423420a04646af6a7b4ca0350170 2024-12-12T16:27:35,757 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,758 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T16:27:35,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:35,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:35,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:35,759 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:35,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:35,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:35,760 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/a650423420a04646af6a7b4ca0350170, entries=150, sequenceid=119, filesize=30.2 K 2024-12-12T16:27:35,761 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/ece10e06e8a14854b74ea859255b76d9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/ece10e06e8a14854b74ea859255b76d9 2024-12-12T16:27:35,771 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/ece10e06e8a14854b74ea859255b76d9, entries=150, sequenceid=119, filesize=11.7 K 2024-12-12T16:27:35,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/3aa264f9203b4731a2740e2776491c3d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/3aa264f9203b4731a2740e2776491c3d 2024-12-12T16:27:35,778 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/3aa264f9203b4731a2740e2776491c3d, entries=150, sequenceid=119, filesize=11.7 K 2024-12-12T16:27:35,780 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 044396069fa7748e35e43f97f084a6ce in 249ms, sequenceid=119, compaction requested=false 2024-12-12T16:27:35,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:35,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:35,864 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T16:27:35,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:35,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:35,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:35,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:35,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:35,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:35,887 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c614c8468a2949ddb9e87971f8b8c649_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020855549/Put/seqid=0 2024-12-12T16:27:35,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020915893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020915894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020915897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,903 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020915899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,903 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:35,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020915900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741997_1173 (size=12304) 2024-12-12T16:27:35,911 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:35,911 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:35,912 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T16:27:35,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:35,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:35,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:35,912 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:35,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:35,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:35,920 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c614c8468a2949ddb9e87971f8b8c649_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c614c8468a2949ddb9e87971f8b8c649_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:35,921 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/3d1edd717ed0456cacff15bf5ee9e645, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:35,922 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/3d1edd717ed0456cacff15bf5ee9e645 is 175, key is test_row_0/A:col10/1734020855549/Put/seqid=0 2024-12-12T16:27:35,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741998_1174 (size=31105) 2024-12-12T16:27:36,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:36,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020916002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,003 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:36,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020916002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:36,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020916005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:36,008 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:36,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020916005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020916005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,043 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/378708e3b44b49a08bd8e5ff2c1f15e9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/378708e3b44b49a08bd8e5ff2c1f15e9 2024-12-12T16:27:36,051 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/C of 044396069fa7748e35e43f97f084a6ce into 378708e3b44b49a08bd8e5ff2c1f15e9(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:36,051 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:36,051 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/C, priority=13, startTime=1734020855480; duration=0sec 2024-12-12T16:27:36,051 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:36,051 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:C 2024-12-12T16:27:36,065 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,066 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T16:27:36,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:36,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:36,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:36,066 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:36,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020916205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:36,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020916205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:36,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020916209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:36,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020916210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:36,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020916210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,218 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T16:27:36,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:36,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:36,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:36,219 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T16:27:36,342 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=134, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/3d1edd717ed0456cacff15bf5ee9e645 2024-12-12T16:27:36,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/8087b022c8f34e01afb7b0d56a782732 is 50, key is test_row_0/B:col10/1734020855549/Put/seqid=0 2024-12-12T16:27:36,372 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,372 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T16:27:36,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:36,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:36,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:36,373 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741999_1175 (size=12151) 2024-12-12T16:27:36,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:36,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020916508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:36,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020916510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:36,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020916511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:36,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020916515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:36,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020916517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,525 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,525 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T16:27:36,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:36,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:36,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:36,525 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,677 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,678 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T16:27:36,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:36,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:36,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:36,679 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,783 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/8087b022c8f34e01afb7b0d56a782732 2024-12-12T16:27:36,792 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/77951a13bd5f43be85c238002360a96d is 50, key is test_row_0/C:col10/1734020855549/Put/seqid=0 2024-12-12T16:27:36,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742000_1176 (size=12151) 2024-12-12T16:27:36,831 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,832 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T16:27:36,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:36,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:36,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:36,832 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,984 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:36,985 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T16:27:36,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:36,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:36,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:36,985 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:36,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:37,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:37,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020917014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:37,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:37,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020917016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:37,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:37,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020917018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:37,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:37,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020917021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:37,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:37,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020917022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:37,138 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:37,138 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T16:27:37,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:37,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:37,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:37,139 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:37,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:37,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:37,212 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/77951a13bd5f43be85c238002360a96d 2024-12-12T16:27:37,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/3d1edd717ed0456cacff15bf5ee9e645 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/3d1edd717ed0456cacff15bf5ee9e645 2024-12-12T16:27:37,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T16:27:37,234 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/3d1edd717ed0456cacff15bf5ee9e645, entries=150, sequenceid=134, filesize=30.4 K 2024-12-12T16:27:37,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/8087b022c8f34e01afb7b0d56a782732 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/8087b022c8f34e01afb7b0d56a782732 2024-12-12T16:27:37,240 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/8087b022c8f34e01afb7b0d56a782732, entries=150, sequenceid=134, filesize=11.9 K 2024-12-12T16:27:37,241 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/77951a13bd5f43be85c238002360a96d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/77951a13bd5f43be85c238002360a96d 2024-12-12T16:27:37,248 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/77951a13bd5f43be85c238002360a96d, entries=150, sequenceid=134, filesize=11.9 K 2024-12-12T16:27:37,250 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 044396069fa7748e35e43f97f084a6ce in 1387ms, sequenceid=134, compaction requested=true 2024-12-12T16:27:37,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:37,251 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:27:37,251 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:37,251 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:37,251 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:37,251 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:27:37,251 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:37,251 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:27:37,251 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:37,252 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93221 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:37,252 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/A is initiating minor compaction (all files) 2024-12-12T16:27:37,253 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/A in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:37,253 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/5f9f67b6cfb34603947ac5d74fb69b19, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/a650423420a04646af6a7b4ca0350170, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/3d1edd717ed0456cacff15bf5ee9e645] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=91.0 K 2024-12-12T16:27:37,253 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:37,253 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/5f9f67b6cfb34603947ac5d74fb69b19, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/a650423420a04646af6a7b4ca0350170, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/3d1edd717ed0456cacff15bf5ee9e645] 2024-12-12T16:27:37,254 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:37,254 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f9f67b6cfb34603947ac5d74fb69b19, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734020854563 2024-12-12T16:27:37,254 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/B is initiating minor compaction (all files) 2024-12-12T16:27:37,254 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/B in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:37,254 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/ab60897030ca4081a9b598ef46236369, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/ece10e06e8a14854b74ea859255b76d9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/8087b022c8f34e01afb7b0d56a782732] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=35.5 K 2024-12-12T16:27:37,254 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting a650423420a04646af6a7b4ca0350170, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1734020854906 2024-12-12T16:27:37,255 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting ab60897030ca4081a9b598ef46236369, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734020854563 2024-12-12T16:27:37,255 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d1edd717ed0456cacff15bf5ee9e645, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1734020855549 2024-12-12T16:27:37,255 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting ece10e06e8a14854b74ea859255b76d9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1734020854906 2024-12-12T16:27:37,256 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 8087b022c8f34e01afb7b0d56a782732, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1734020855549 2024-12-12T16:27:37,265 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:37,277 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412128f01bbceb9ab4ed0a00cacd632bb0397_044396069fa7748e35e43f97f084a6ce store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:37,278 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#B#compaction#151 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:37,279 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/7fb684ebbc294f24a3f6c0da5b7ecc67 is 50, key is test_row_0/B:col10/1734020855549/Put/seqid=0 2024-12-12T16:27:37,280 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412128f01bbceb9ab4ed0a00cacd632bb0397_044396069fa7748e35e43f97f084a6ce, store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:37,280 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128f01bbceb9ab4ed0a00cacd632bb0397_044396069fa7748e35e43f97f084a6ce because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:37,291 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:37,292 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-12T16:27:37,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:37,292 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T16:27:37,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:37,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:37,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:37,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:37,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:37,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:37,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742002_1178 (size=4469) 2024-12-12T16:27:37,305 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#A#compaction#150 average throughput is 0.61 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:37,306 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/57a3393156474797995e385c91c1d793 is 175, key is test_row_0/A:col10/1734020855549/Put/seqid=0 2024-12-12T16:27:37,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742001_1177 (size=12459) 2024-12-12T16:27:37,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212e16a14ad85b343838ef575c0fa1c4d05_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020855895/Put/seqid=0 2024-12-12T16:27:37,320 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/7fb684ebbc294f24a3f6c0da5b7ecc67 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/7fb684ebbc294f24a3f6c0da5b7ecc67 2024-12-12T16:27:37,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742003_1179 (size=31413) 2024-12-12T16:27:37,331 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/B of 044396069fa7748e35e43f97f084a6ce into 7fb684ebbc294f24a3f6c0da5b7ecc67(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:37,332 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:37,332 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/B, priority=13, startTime=1734020857251; duration=0sec 2024-12-12T16:27:37,333 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:37,333 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:B 2024-12-12T16:27:37,333 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:37,334 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:37,334 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/C is initiating minor compaction (all files) 2024-12-12T16:27:37,334 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/C in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:37,334 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/378708e3b44b49a08bd8e5ff2c1f15e9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/3aa264f9203b4731a2740e2776491c3d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/77951a13bd5f43be85c238002360a96d] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=35.5 K 2024-12-12T16:27:37,335 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 378708e3b44b49a08bd8e5ff2c1f15e9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734020854563 2024-12-12T16:27:37,335 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 3aa264f9203b4731a2740e2776491c3d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1734020854906 2024-12-12T16:27:37,336 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 77951a13bd5f43be85c238002360a96d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1734020855549 2024-12-12T16:27:37,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742004_1180 (size=12304) 2024-12-12T16:27:37,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,353 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212e16a14ad85b343838ef575c0fa1c4d05_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e16a14ad85b343838ef575c0fa1c4d05_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:37,353 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#C#compaction#153 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:37,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/e0b2c7ab390c4ed3afa867ac24bd07f2, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:37,354 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/ea46d63b05cd4850aacc321092f4481b is 50, key is test_row_0/C:col10/1734020855549/Put/seqid=0 2024-12-12T16:27:37,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/e0b2c7ab390c4ed3afa867ac24bd07f2 is 175, key is test_row_0/A:col10/1734020855895/Put/seqid=0 2024-12-12T16:27:37,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742005_1181 (size=31105) 2024-12-12T16:27:37,379 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/e0b2c7ab390c4ed3afa867ac24bd07f2 2024-12-12T16:27:37,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742006_1182 (size=12459) 2024-12-12T16:27:37,394 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/ea46d63b05cd4850aacc321092f4481b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/ea46d63b05cd4850aacc321092f4481b 2024-12-12T16:27:37,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/f946372aae2b4a25bb2f5f94ddb3f9ee is 50, key is test_row_0/B:col10/1734020855895/Put/seqid=0 2024-12-12T16:27:37,404 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/C of 044396069fa7748e35e43f97f084a6ce into ea46d63b05cd4850aacc321092f4481b(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:37,404 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:37,404 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/C, priority=13, startTime=1734020857251; duration=0sec 2024-12-12T16:27:37,404 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:37,404 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:C 2024-12-12T16:27:37,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742007_1183 (size=12151) 2024-12-12T16:27:37,410 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/f946372aae2b4a25bb2f5f94ddb3f9ee 2024-12-12T16:27:37,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/8fadccfbe23643508fdd1c28fbb94bfe is 50, key is test_row_0/C:col10/1734020855895/Put/seqid=0 2024-12-12T16:27:37,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742008_1184 (size=12151) 2024-12-12T16:27:37,438 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/8fadccfbe23643508fdd1c28fbb94bfe 2024-12-12T16:27:37,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/e0b2c7ab390c4ed3afa867ac24bd07f2 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/e0b2c7ab390c4ed3afa867ac24bd07f2 2024-12-12T16:27:37,457 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/e0b2c7ab390c4ed3afa867ac24bd07f2, entries=150, sequenceid=157, filesize=30.4 K 2024-12-12T16:27:37,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/f946372aae2b4a25bb2f5f94ddb3f9ee as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/f946372aae2b4a25bb2f5f94ddb3f9ee 2024-12-12T16:27:37,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,464 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/f946372aae2b4a25bb2f5f94ddb3f9ee, entries=150, sequenceid=157, filesize=11.9 K 2024-12-12T16:27:37,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/8fadccfbe23643508fdd1c28fbb94bfe as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/8fadccfbe23643508fdd1c28fbb94bfe 2024-12-12T16:27:37,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,478 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/8fadccfbe23643508fdd1c28fbb94bfe, entries=150, sequenceid=157, filesize=11.9 K 2024-12-12T16:27:37,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,480 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for 044396069fa7748e35e43f97f084a6ce in 188ms, sequenceid=157, compaction requested=false 2024-12-12T16:27:37,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:37,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:37,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-12T16:27:37,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-12T16:27:37,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,484 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-12T16:27:37,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,484 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3570 sec 2024-12-12T16:27:37,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,486 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 2.3620 sec 2024-12-12T16:27:37,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,736 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/57a3393156474797995e385c91c1d793 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/57a3393156474797995e385c91c1d793 2024-12-12T16:27:37,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,743 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/A of 044396069fa7748e35e43f97f084a6ce into 57a3393156474797995e385c91c1d793(size=30.7 K), total size for store is 61.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:37,743 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:37,743 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/A, priority=13, startTime=1734020857251; duration=0sec 2024-12-12T16:27:37,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,743 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:37,743 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:A 2024-12-12T16:27:37,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:37,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:38,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,108 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:27:38,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:38,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:38,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:38,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:38,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:38,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:38,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,139 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212f5a2226c34354dbe8252868dfb2b54b0_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020858107/Put/seqid=0 2024-12-12T16:27:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020918149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:38,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020918151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:38,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020918152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:38,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020918153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:38,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020918154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:38,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742010_1186 (size=27248) 2024-12-12T16:27:38,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020918255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:38,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020918255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:38,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020918255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:38,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020918256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:38,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020918256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:38,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020918458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:38,460 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020918460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:38,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020918460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:38,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020918460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:38,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020918460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:38,571 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:38,577 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212f5a2226c34354dbe8252868dfb2b54b0_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f5a2226c34354dbe8252868dfb2b54b0_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:38,578 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/2c5a1f887cf04e61aaa99bd06df3fe1b, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:38,579 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/2c5a1f887cf04e61aaa99bd06df3fe1b is 175, key is test_row_0/A:col10/1734020858107/Put/seqid=0 2024-12-12T16:27:38,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742009_1185 (size=83035) 2024-12-12T16:27:38,582 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=172, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/2c5a1f887cf04e61aaa99bd06df3fe1b 2024-12-12T16:27:38,612 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/be7f766b92724b2e9a48ca8127885fd6 is 50, key is test_row_0/B:col10/1734020858107/Put/seqid=0 2024-12-12T16:27:38,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742011_1187 (size=12151) 2024-12-12T16:27:38,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020918760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:38,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020918763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:38,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020918764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:38,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020918764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:38,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:38,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020918764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,032 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/be7f766b92724b2e9a48ca8127885fd6 2024-12-12T16:27:39,044 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/48ebb81395004f22a81656c157e05bf8 is 50, key is test_row_0/C:col10/1734020858107/Put/seqid=0 2024-12-12T16:27:39,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742012_1188 (size=12151) 2024-12-12T16:27:39,051 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/48ebb81395004f22a81656c157e05bf8 2024-12-12T16:27:39,056 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/2c5a1f887cf04e61aaa99bd06df3fe1b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2c5a1f887cf04e61aaa99bd06df3fe1b 2024-12-12T16:27:39,061 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2c5a1f887cf04e61aaa99bd06df3fe1b, entries=450, sequenceid=172, filesize=81.1 K 2024-12-12T16:27:39,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/be7f766b92724b2e9a48ca8127885fd6 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/be7f766b92724b2e9a48ca8127885fd6 2024-12-12T16:27:39,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/be7f766b92724b2e9a48ca8127885fd6, entries=150, sequenceid=172, filesize=11.9 K 2024-12-12T16:27:39,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/48ebb81395004f22a81656c157e05bf8 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/48ebb81395004f22a81656c157e05bf8 2024-12-12T16:27:39,080 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/48ebb81395004f22a81656c157e05bf8, entries=150, sequenceid=172, filesize=11.9 K 2024-12-12T16:27:39,082 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 044396069fa7748e35e43f97f084a6ce in 974ms, sequenceid=172, compaction requested=true 2024-12-12T16:27:39,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:39,083 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:39,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:27:39,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:39,084 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:39,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:27:39,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:39,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:27:39,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:39,085 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 145553 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:39,085 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/A is initiating minor compaction (all files) 2024-12-12T16:27:39,085 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/A in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:39,085 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/57a3393156474797995e385c91c1d793, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/e0b2c7ab390c4ed3afa867ac24bd07f2, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2c5a1f887cf04e61aaa99bd06df3fe1b] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=142.1 K 2024-12-12T16:27:39,085 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:39,085 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/57a3393156474797995e385c91c1d793, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/e0b2c7ab390c4ed3afa867ac24bd07f2, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2c5a1f887cf04e61aaa99bd06df3fe1b] 2024-12-12T16:27:39,086 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:39,086 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57a3393156474797995e385c91c1d793, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1734020855549 2024-12-12T16:27:39,086 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/B is initiating minor compaction (all files) 2024-12-12T16:27:39,086 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/B in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:39,086 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/7fb684ebbc294f24a3f6c0da5b7ecc67, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/f946372aae2b4a25bb2f5f94ddb3f9ee, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/be7f766b92724b2e9a48ca8127885fd6] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=35.9 K 2024-12-12T16:27:39,086 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0b2c7ab390c4ed3afa867ac24bd07f2, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734020855895 2024-12-12T16:27:39,087 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 7fb684ebbc294f24a3f6c0da5b7ecc67, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1734020855549 2024-12-12T16:27:39,087 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c5a1f887cf04e61aaa99bd06df3fe1b, keycount=450, bloomtype=ROW, size=81.1 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1734020858061 2024-12-12T16:27:39,087 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting f946372aae2b4a25bb2f5f94ddb3f9ee, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734020855895 2024-12-12T16:27:39,088 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting be7f766b92724b2e9a48ca8127885fd6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1734020858073 2024-12-12T16:27:39,101 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:39,104 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#B#compaction#160 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:39,105 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/8de13515a3024316b977b94c1db5951b is 50, key is test_row_0/B:col10/1734020858107/Put/seqid=0 2024-12-12T16:27:39,122 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412126c39d5cb3aac435dab70436831f25ea0_044396069fa7748e35e43f97f084a6ce store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:39,130 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412126c39d5cb3aac435dab70436831f25ea0_044396069fa7748e35e43f97f084a6ce, store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:39,131 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126c39d5cb3aac435dab70436831f25ea0_044396069fa7748e35e43f97f084a6ce because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:39,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742013_1189 (size=12561) 2024-12-12T16:27:39,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742014_1190 (size=4469) 2024-12-12T16:27:39,216 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-12T16:27:39,216 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-12T16:27:39,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-12T16:27:39,235 INFO [Thread-694 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-12T16:27:39,236 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:27:39,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-12T16:27:39,238 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:27:39,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T16:27:39,239 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:27:39,239 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:27:39,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:39,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T16:27:39,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:39,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:39,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:39,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:39,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:39,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:39,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,283 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212021ca684a1d3400b9175e4240b4270f7_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020858152/Put/seqid=0 2024-12-12T16:27:39,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020919277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020919277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020919278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020919278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020919279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742015_1191 (size=14794) 2024-12-12T16:27:39,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T16:27:39,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020919385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020919386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,391 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,392 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T16:27:39,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:39,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:39,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:39,392 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:39,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:39,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:39,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020919386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020919386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020919387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T16:27:39,545 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,545 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T16:27:39,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:39,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:39,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:39,546 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:39,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:39,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:39,559 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/8de13515a3024316b977b94c1db5951b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/8de13515a3024316b977b94c1db5951b 2024-12-12T16:27:39,560 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#A#compaction#159 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:39,561 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/70717b8ab4d147449c2aa55c309e48d3 is 175, key is test_row_0/A:col10/1734020858107/Put/seqid=0 2024-12-12T16:27:39,573 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/B of 044396069fa7748e35e43f97f084a6ce into 8de13515a3024316b977b94c1db5951b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:39,573 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:39,573 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/B, priority=13, startTime=1734020859084; duration=0sec 2024-12-12T16:27:39,573 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:39,573 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:B 2024-12-12T16:27:39,574 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:39,575 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:39,576 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/C is initiating minor compaction (all files) 2024-12-12T16:27:39,576 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/C in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:39,576 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/ea46d63b05cd4850aacc321092f4481b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/8fadccfbe23643508fdd1c28fbb94bfe, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/48ebb81395004f22a81656c157e05bf8] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=35.9 K 2024-12-12T16:27:39,576 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting ea46d63b05cd4850aacc321092f4481b, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1734020855549 2024-12-12T16:27:39,577 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 8fadccfbe23643508fdd1c28fbb94bfe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734020855895 2024-12-12T16:27:39,577 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 48ebb81395004f22a81656c157e05bf8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1734020858073 2024-12-12T16:27:39,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020919588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020919588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020919595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020919596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020919596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742016_1192 (size=31515) 2024-12-12T16:27:39,604 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#C#compaction#162 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:39,605 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/009dbe2e3a75470c8f9753ae1f439152 is 50, key is test_row_0/C:col10/1734020858107/Put/seqid=0 2024-12-12T16:27:39,615 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/70717b8ab4d147449c2aa55c309e48d3 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/70717b8ab4d147449c2aa55c309e48d3 2024-12-12T16:27:39,623 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/A of 044396069fa7748e35e43f97f084a6ce into 70717b8ab4d147449c2aa55c309e48d3(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:39,623 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:39,623 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/A, priority=13, startTime=1734020859083; duration=0sec 2024-12-12T16:27:39,623 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:39,623 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:A 2024-12-12T16:27:39,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742017_1193 (size=12561) 2024-12-12T16:27:39,697 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,698 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T16:27:39,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:39,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:39,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:39,698 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:39,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:39,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:39,704 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:39,710 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212021ca684a1d3400b9175e4240b4270f7_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212021ca684a1d3400b9175e4240b4270f7_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:39,712 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/864dd3d65a6d4bb1b45ab0981eac8a3b, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:39,713 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/864dd3d65a6d4bb1b45ab0981eac8a3b is 175, key is test_row_0/A:col10/1734020858152/Put/seqid=0 2024-12-12T16:27:39,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742018_1194 (size=39749) 2024-12-12T16:27:39,748 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=198, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/864dd3d65a6d4bb1b45ab0981eac8a3b 2024-12-12T16:27:39,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/0fbd022afd984d07a33a88d83d92ac5c is 50, key is test_row_0/B:col10/1734020858152/Put/seqid=0 2024-12-12T16:27:39,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742019_1195 (size=12151) 2024-12-12T16:27:39,799 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/0fbd022afd984d07a33a88d83d92ac5c 2024-12-12T16:27:39,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/daf53761b6d2405e921bdd58dd84f6fa is 50, key is test_row_0/C:col10/1734020858152/Put/seqid=0 2024-12-12T16:27:39,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T16:27:39,850 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,851 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T16:27:39,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:39,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:39,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:39,852 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:39,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:39,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:39,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742020_1196 (size=12151) 2024-12-12T16:27:39,887 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/daf53761b6d2405e921bdd58dd84f6fa 2024-12-12T16:27:39,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020919891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020919892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,896 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/864dd3d65a6d4bb1b45ab0981eac8a3b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/864dd3d65a6d4bb1b45ab0981eac8a3b 2024-12-12T16:27:39,901 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/864dd3d65a6d4bb1b45ab0981eac8a3b, entries=200, sequenceid=198, filesize=38.8 K 2024-12-12T16:27:39,903 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020919898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,905 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/0fbd022afd984d07a33a88d83d92ac5c as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/0fbd022afd984d07a33a88d83d92ac5c 2024-12-12T16:27:39,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020919901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:39,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020919899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:39,911 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/0fbd022afd984d07a33a88d83d92ac5c, entries=150, sequenceid=198, filesize=11.9 K 2024-12-12T16:27:39,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/daf53761b6d2405e921bdd58dd84f6fa as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/daf53761b6d2405e921bdd58dd84f6fa 2024-12-12T16:27:39,917 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/daf53761b6d2405e921bdd58dd84f6fa, entries=150, sequenceid=198, filesize=11.9 K 2024-12-12T16:27:39,918 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 044396069fa7748e35e43f97f084a6ce in 649ms, sequenceid=198, compaction requested=false 2024-12-12T16:27:39,918 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:40,004 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:40,004 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-12T16:27:40,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:40,005 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-12T16:27:40,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:40,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:40,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:40,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:40,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:40,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:40,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212f5ecc4c47b494b159c3a8af4c103b8d8_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020859276/Put/seqid=0 2024-12-12T16:27:40,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742021_1197 (size=12304) 2024-12-12T16:27:40,042 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/009dbe2e3a75470c8f9753ae1f439152 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/009dbe2e3a75470c8f9753ae1f439152 2024-12-12T16:27:40,048 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/C of 044396069fa7748e35e43f97f084a6ce into 009dbe2e3a75470c8f9753ae1f439152(size=12.3 K), total size for store is 24.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:40,048 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:40,048 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/C, priority=13, startTime=1734020859084; duration=0sec 2024-12-12T16:27:40,049 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:40,049 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:C 2024-12-12T16:27:40,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T16:27:40,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:40,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:40,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:40,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020920419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:40,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:40,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020920420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:40,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:40,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:40,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020920421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:40,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020920422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:40,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:40,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020920423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:40,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:40,440 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212f5ecc4c47b494b159c3a8af4c103b8d8_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f5ecc4c47b494b159c3a8af4c103b8d8_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:40,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/b1db9123062b4e0c88754190035c7c0c, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:40,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/b1db9123062b4e0c88754190035c7c0c is 175, key is test_row_0/A:col10/1734020859276/Put/seqid=0 2024-12-12T16:27:40,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742022_1198 (size=31105) 2024-12-12T16:27:40,528 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:40,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020920525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:40,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:40,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020920528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:40,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:40,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:40,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020920529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:40,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020920528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:40,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:40,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020920529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:40,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:40,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020920732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:40,733 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:40,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020920733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:40,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:40,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020920733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:40,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:40,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020920733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:40,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:40,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020920735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:40,848 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=210, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/b1db9123062b4e0c88754190035c7c0c 2024-12-12T16:27:40,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/5779e1f2410c4df69730fc01f976b2a4 is 50, key is test_row_0/B:col10/1734020859276/Put/seqid=0 2024-12-12T16:27:40,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742023_1199 (size=12151) 2024-12-12T16:27:41,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:41,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020921034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:41,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:41,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020921036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:41,039 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:41,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020921037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:41,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:41,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020921039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:41,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:41,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020921039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:41,273 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/5779e1f2410c4df69730fc01f976b2a4 2024-12-12T16:27:41,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/89cbc37040bb4656aef218a03c0c8258 is 50, key is test_row_0/C:col10/1734020859276/Put/seqid=0 2024-12-12T16:27:41,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742024_1200 (size=12151) 2024-12-12T16:27:41,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T16:27:41,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:41,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020921538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:41,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:41,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020921541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:41,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:41,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020921543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:41,545 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:41,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:41,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020921544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:41,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020921544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:41,687 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/89cbc37040bb4656aef218a03c0c8258 2024-12-12T16:27:41,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/b1db9123062b4e0c88754190035c7c0c as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/b1db9123062b4e0c88754190035c7c0c 2024-12-12T16:27:41,697 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/b1db9123062b4e0c88754190035c7c0c, entries=150, sequenceid=210, filesize=30.4 K 2024-12-12T16:27:41,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/5779e1f2410c4df69730fc01f976b2a4 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/5779e1f2410c4df69730fc01f976b2a4 2024-12-12T16:27:41,703 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/5779e1f2410c4df69730fc01f976b2a4, entries=150, sequenceid=210, filesize=11.9 K 2024-12-12T16:27:41,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/89cbc37040bb4656aef218a03c0c8258 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/89cbc37040bb4656aef218a03c0c8258 2024-12-12T16:27:41,710 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/89cbc37040bb4656aef218a03c0c8258, entries=150, sequenceid=210, filesize=11.9 K 2024-12-12T16:27:41,711 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 044396069fa7748e35e43f97f084a6ce in 1706ms, sequenceid=210, compaction requested=true 2024-12-12T16:27:41,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:41,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:41,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-12T16:27:41,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-12T16:27:41,714 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-12T16:27:41,714 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4740 sec 2024-12-12T16:27:41,716 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 2.4790 sec 2024-12-12T16:27:42,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:42,549 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-12T16:27:42,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:42,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:42,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:42,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:42,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:42,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:42,557 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:42,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:42,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020922553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:42,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020922553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:42,557 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:42,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020922554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:42,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:42,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:42,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020922558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:42,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020922557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:42,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212172ac6630b8f4a8dbfad8b5ea379176b_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020862547/Put/seqid=0 2024-12-12T16:27:42,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742025_1201 (size=14794) 2024-12-12T16:27:42,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:42,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020922658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:42,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:42,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020922658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:42,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:42,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020922659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:42,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:42,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020922662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:42,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:42,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020922662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:42,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020922860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:42,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:42,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020922863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:42,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:42,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020922863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:42,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:42,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:42,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020922867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:42,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020922867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:42,994 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:42,999 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212172ac6630b8f4a8dbfad8b5ea379176b_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212172ac6630b8f4a8dbfad8b5ea379176b_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:43,000 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/0bc11d64afbb49c290ad22a73c8519ef, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:43,001 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/0bc11d64afbb49c290ad22a73c8519ef is 175, key is test_row_0/A:col10/1734020862547/Put/seqid=0 2024-12-12T16:27:43,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742026_1202 (size=39749) 2024-12-12T16:27:43,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:43,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020923165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:43,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:43,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020923169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:43,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:43,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020923169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:43,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:43,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020923170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:43,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:43,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020923170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:43,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T16:27:43,344 INFO [Thread-694 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-12T16:27:43,345 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:27:43,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-12T16:27:43,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-12T16:27:43,347 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:27:43,348 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:27:43,348 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:27:43,408 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=238, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/0bc11d64afbb49c290ad22a73c8519ef 2024-12-12T16:27:43,416 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/3d766a3f93f348baa8108191186364a0 is 50, key is test_row_0/B:col10/1734020862547/Put/seqid=0 2024-12-12T16:27:43,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742027_1203 (size=12151) 2024-12-12T16:27:43,421 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/3d766a3f93f348baa8108191186364a0 2024-12-12T16:27:43,430 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/dee4597c3a7b4b55b3772a6f04cfc246 is 50, key is test_row_0/C:col10/1734020862547/Put/seqid=0 2024-12-12T16:27:43,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742028_1204 (size=12151) 2024-12-12T16:27:43,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-12T16:27:43,499 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:43,500 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T16:27:43,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:43,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:43,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:43,500 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:43,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:43,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:43,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-12T16:27:43,652 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:43,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T16:27:43,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:43,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:43,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:43,654 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:43,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:43,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:43,672 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:43,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020923670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:43,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:43,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020923672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:43,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:43,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:43,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020923675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:43,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020923675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:43,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:43,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020923676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:43,806 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:43,806 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T16:27:43,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:43,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:43,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:43,807 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:43,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:43,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:43,835 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/dee4597c3a7b4b55b3772a6f04cfc246 2024-12-12T16:27:43,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/0bc11d64afbb49c290ad22a73c8519ef as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/0bc11d64afbb49c290ad22a73c8519ef 2024-12-12T16:27:43,845 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/0bc11d64afbb49c290ad22a73c8519ef, entries=200, sequenceid=238, filesize=38.8 K 2024-12-12T16:27:43,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/3d766a3f93f348baa8108191186364a0 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/3d766a3f93f348baa8108191186364a0 2024-12-12T16:27:43,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/3d766a3f93f348baa8108191186364a0, entries=150, sequenceid=238, filesize=11.9 K 2024-12-12T16:27:43,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/dee4597c3a7b4b55b3772a6f04cfc246 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/dee4597c3a7b4b55b3772a6f04cfc246 2024-12-12T16:27:43,858 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/dee4597c3a7b4b55b3772a6f04cfc246, entries=150, sequenceid=238, filesize=11.9 K 2024-12-12T16:27:43,859 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 044396069fa7748e35e43f97f084a6ce in 1311ms, sequenceid=238, compaction requested=true 2024-12-12T16:27:43,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:43,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:27:43,859 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:27:43,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:43,859 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:27:43,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:27:43,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:43,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:27:43,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:43,861 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:27:43,861 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142118 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:27:43,861 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/B is initiating minor compaction (all files) 2024-12-12T16:27:43,861 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/A is initiating minor compaction (all files) 2024-12-12T16:27:43,861 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/B in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:43,861 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/A in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:43,861 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/8de13515a3024316b977b94c1db5951b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/0fbd022afd984d07a33a88d83d92ac5c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/5779e1f2410c4df69730fc01f976b2a4, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/3d766a3f93f348baa8108191186364a0] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=47.9 K 2024-12-12T16:27:43,861 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/70717b8ab4d147449c2aa55c309e48d3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/864dd3d65a6d4bb1b45ab0981eac8a3b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/b1db9123062b4e0c88754190035c7c0c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/0bc11d64afbb49c290ad22a73c8519ef] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=138.8 K 2024-12-12T16:27:43,861 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:43,862 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/70717b8ab4d147449c2aa55c309e48d3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/864dd3d65a6d4bb1b45ab0981eac8a3b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/b1db9123062b4e0c88754190035c7c0c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/0bc11d64afbb49c290ad22a73c8519ef] 2024-12-12T16:27:43,862 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 8de13515a3024316b977b94c1db5951b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1734020858073 2024-12-12T16:27:43,863 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 0fbd022afd984d07a33a88d83d92ac5c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1734020858150 2024-12-12T16:27:43,863 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70717b8ab4d147449c2aa55c309e48d3, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1734020858073 2024-12-12T16:27:43,863 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 5779e1f2410c4df69730fc01f976b2a4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734020859274 2024-12-12T16:27:43,863 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 864dd3d65a6d4bb1b45ab0981eac8a3b, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1734020858150 2024-12-12T16:27:43,864 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d766a3f93f348baa8108191186364a0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734020860421 2024-12-12T16:27:43,864 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1db9123062b4e0c88754190035c7c0c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734020859274 2024-12-12T16:27:43,864 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0bc11d64afbb49c290ad22a73c8519ef, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734020860418 2024-12-12T16:27:43,873 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:43,875 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#B#compaction#171 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:43,875 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/e99d405c0eb142bfa9ee5474ee4addee is 50, key is test_row_0/B:col10/1734020862547/Put/seqid=0 2024-12-12T16:27:43,876 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212bcec6844c1324fef8e6297a56425198e_044396069fa7748e35e43f97f084a6ce store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:43,878 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212bcec6844c1324fef8e6297a56425198e_044396069fa7748e35e43f97f084a6ce, store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:43,878 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212bcec6844c1324fef8e6297a56425198e_044396069fa7748e35e43f97f084a6ce because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:43,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742030_1206 (size=4469) 2024-12-12T16:27:43,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742029_1205 (size=12697) 2024-12-12T16:27:43,904 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/e99d405c0eb142bfa9ee5474ee4addee as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/e99d405c0eb142bfa9ee5474ee4addee 2024-12-12T16:27:43,910 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/B of 044396069fa7748e35e43f97f084a6ce into e99d405c0eb142bfa9ee5474ee4addee(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:43,910 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:43,910 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/B, priority=12, startTime=1734020863859; duration=0sec 2024-12-12T16:27:43,910 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:43,910 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:B 2024-12-12T16:27:43,910 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:27:43,912 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:27:43,912 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/C is initiating minor compaction (all files) 2024-12-12T16:27:43,912 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/C in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:43,912 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/009dbe2e3a75470c8f9753ae1f439152, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/daf53761b6d2405e921bdd58dd84f6fa, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/89cbc37040bb4656aef218a03c0c8258, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/dee4597c3a7b4b55b3772a6f04cfc246] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=47.9 K 2024-12-12T16:27:43,913 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 009dbe2e3a75470c8f9753ae1f439152, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1734020858073 2024-12-12T16:27:43,913 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting daf53761b6d2405e921bdd58dd84f6fa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1734020858150 2024-12-12T16:27:43,914 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 89cbc37040bb4656aef218a03c0c8258, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1734020859274 2024-12-12T16:27:43,914 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting dee4597c3a7b4b55b3772a6f04cfc246, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734020860421 2024-12-12T16:27:43,924 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#C#compaction#173 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:43,925 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/dc3c23d96d5b44d996cc47964c80b79b is 50, key is test_row_0/C:col10/1734020862547/Put/seqid=0 2024-12-12T16:27:43,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742031_1207 (size=12697) 2024-12-12T16:27:43,935 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/dc3c23d96d5b44d996cc47964c80b79b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/dc3c23d96d5b44d996cc47964c80b79b 2024-12-12T16:27:43,939 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/C of 044396069fa7748e35e43f97f084a6ce into dc3c23d96d5b44d996cc47964c80b79b(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:43,940 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:43,940 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/C, priority=12, startTime=1734020863860; duration=0sec 2024-12-12T16:27:43,940 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:43,940 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:C 2024-12-12T16:27:43,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-12T16:27:43,959 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:43,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-12T16:27:43,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:43,959 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-12T16:27:43,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:43,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:43,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:43,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:43,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:43,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:43,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128443405ca4a14a98951cc1a185c51cee_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020862556/Put/seqid=0 2024-12-12T16:27:43,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742032_1208 (size=12304) 2024-12-12T16:27:43,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:43,978 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128443405ca4a14a98951cc1a185c51cee_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128443405ca4a14a98951cc1a185c51cee_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:43,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/2ad5134368f1465380b45b11ef4baf95, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:43,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/2ad5134368f1465380b45b11ef4baf95 is 175, key is test_row_0/A:col10/1734020862556/Put/seqid=0 2024-12-12T16:27:43,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742033_1209 (size=31105) 2024-12-12T16:27:44,292 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#A#compaction#172 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:44,292 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/7ed867db702e42c4be5d2a2911ec4c11 is 175, key is test_row_0/A:col10/1734020862547/Put/seqid=0 2024-12-12T16:27:44,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742034_1210 (size=31651) 2024-12-12T16:27:44,393 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=250, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/2ad5134368f1465380b45b11ef4baf95 2024-12-12T16:27:44,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/300e0f71b00b415d964ea95d449c3a8f is 50, key is test_row_0/B:col10/1734020862556/Put/seqid=0 2024-12-12T16:27:44,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742035_1211 (size=12151) 2024-12-12T16:27:44,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-12T16:27:44,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:44,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:44,704 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/7ed867db702e42c4be5d2a2911ec4c11 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/7ed867db702e42c4be5d2a2911ec4c11 2024-12-12T16:27:44,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:44,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020924703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:44,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:44,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020924704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:44,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:44,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:44,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020924705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:44,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020924705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:44,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:44,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020924707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:44,711 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/A of 044396069fa7748e35e43f97f084a6ce into 7ed867db702e42c4be5d2a2911ec4c11(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:44,711 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:44,711 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/A, priority=12, startTime=1734020863859; duration=0sec 2024-12-12T16:27:44,711 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:44,711 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:A 2024-12-12T16:27:44,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:44,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020924808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:44,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:44,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020924808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:44,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:44,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020924809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:44,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:44,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020924809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:44,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:44,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020924810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:44,816 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/300e0f71b00b415d964ea95d449c3a8f 2024-12-12T16:27:44,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/12b80e2ede894b6f80e6cd57845dff3b is 50, key is test_row_0/C:col10/1734020862556/Put/seqid=0 2024-12-12T16:27:44,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742036_1212 (size=12151) 2024-12-12T16:27:45,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020925011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020925012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020925012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020925012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,015 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020925013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,249 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/12b80e2ede894b6f80e6cd57845dff3b 2024-12-12T16:27:45,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/2ad5134368f1465380b45b11ef4baf95 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2ad5134368f1465380b45b11ef4baf95 2024-12-12T16:27:45,259 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2ad5134368f1465380b45b11ef4baf95, entries=150, sequenceid=250, filesize=30.4 K 2024-12-12T16:27:45,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/300e0f71b00b415d964ea95d449c3a8f as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/300e0f71b00b415d964ea95d449c3a8f 2024-12-12T16:27:45,265 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/300e0f71b00b415d964ea95d449c3a8f, entries=150, sequenceid=250, filesize=11.9 K 2024-12-12T16:27:45,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/12b80e2ede894b6f80e6cd57845dff3b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/12b80e2ede894b6f80e6cd57845dff3b 2024-12-12T16:27:45,271 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/12b80e2ede894b6f80e6cd57845dff3b, entries=150, sequenceid=250, filesize=11.9 K 2024-12-12T16:27:45,272 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 044396069fa7748e35e43f97f084a6ce in 1313ms, sequenceid=250, compaction requested=false 2024-12-12T16:27:45,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:45,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:45,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-12T16:27:45,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-12T16:27:45,275 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-12T16:27:45,275 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9250 sec 2024-12-12T16:27:45,277 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.9310 sec 2024-12-12T16:27:45,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:45,315 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-12T16:27:45,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:45,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:45,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:45,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:45,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:45,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:45,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020925320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020925321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020925321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020925323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020925324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,334 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212bc71a22ab462452a8eec2b642e66d2e1_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020864705/Put/seqid=0 2024-12-12T16:27:45,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742037_1213 (size=14994) 2024-12-12T16:27:45,351 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:45,355 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212bc71a22ab462452a8eec2b642e66d2e1_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212bc71a22ab462452a8eec2b642e66d2e1_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:45,356 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/f9a89ba336734d71ad997dab2ed83c34, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:45,357 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/f9a89ba336734d71ad997dab2ed83c34 is 175, key is test_row_0/A:col10/1734020864705/Put/seqid=0 2024-12-12T16:27:45,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742038_1214 (size=39949) 2024-12-12T16:27:45,377 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=278, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/f9a89ba336734d71ad997dab2ed83c34 2024-12-12T16:27:45,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/427ff99de3004b11bd59cdaeed911606 is 50, key is test_row_0/B:col10/1734020864705/Put/seqid=0 2024-12-12T16:27:45,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742039_1215 (size=12301) 2024-12-12T16:27:45,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/427ff99de3004b11bd59cdaeed911606 2024-12-12T16:27:45,410 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/7ceb6436c37e4cf29133a012e28235b0 is 50, key is test_row_0/C:col10/1734020864705/Put/seqid=0 2024-12-12T16:27:45,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742040_1216 (size=12301) 2024-12-12T16:27:45,424 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/7ceb6436c37e4cf29133a012e28235b0 2024-12-12T16:27:45,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020925424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,425 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020925425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020925428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/f9a89ba336734d71ad997dab2ed83c34 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/f9a89ba336734d71ad997dab2ed83c34 2024-12-12T16:27:45,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020925428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020925429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/f9a89ba336734d71ad997dab2ed83c34, entries=200, sequenceid=278, filesize=39.0 K 2024-12-12T16:27:45,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/427ff99de3004b11bd59cdaeed911606 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/427ff99de3004b11bd59cdaeed911606 2024-12-12T16:27:45,441 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/427ff99de3004b11bd59cdaeed911606, entries=150, sequenceid=278, filesize=12.0 K 2024-12-12T16:27:45,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/7ceb6436c37e4cf29133a012e28235b0 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/7ceb6436c37e4cf29133a012e28235b0 2024-12-12T16:27:45,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/7ceb6436c37e4cf29133a012e28235b0, entries=150, sequenceid=278, filesize=12.0 K 2024-12-12T16:27:45,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-12T16:27:45,451 INFO [Thread-694 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-12T16:27:45,452 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 044396069fa7748e35e43f97f084a6ce in 137ms, sequenceid=278, compaction requested=true 2024-12-12T16:27:45,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:45,452 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:45,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:27:45,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:45,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:27:45,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:45,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:27:45,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:45,453 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:45,453 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:27:45,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-12-12T16:27:45,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T16:27:45,455 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:27:45,456 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:27:45,456 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:27:45,459 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102705 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:45,459 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:45,459 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/A is initiating minor compaction (all files) 2024-12-12T16:27:45,459 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/B is initiating minor compaction (all files) 2024-12-12T16:27:45,459 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/A in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:45,459 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/7ed867db702e42c4be5d2a2911ec4c11, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2ad5134368f1465380b45b11ef4baf95, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/f9a89ba336734d71ad997dab2ed83c34] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=100.3 K 2024-12-12T16:27:45,460 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:45,460 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/7ed867db702e42c4be5d2a2911ec4c11, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2ad5134368f1465380b45b11ef4baf95, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/f9a89ba336734d71ad997dab2ed83c34] 2024-12-12T16:27:45,460 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/B in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:45,460 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/e99d405c0eb142bfa9ee5474ee4addee, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/300e0f71b00b415d964ea95d449c3a8f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/427ff99de3004b11bd59cdaeed911606] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=36.3 K 2024-12-12T16:27:45,460 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ed867db702e42c4be5d2a2911ec4c11, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734020860421 2024-12-12T16:27:45,461 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting e99d405c0eb142bfa9ee5474ee4addee, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734020860421 2024-12-12T16:27:45,461 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ad5134368f1465380b45b11ef4baf95, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1734020862550 2024-12-12T16:27:45,462 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 300e0f71b00b415d964ea95d449c3a8f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1734020862550 2024-12-12T16:27:45,462 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting f9a89ba336734d71ad997dab2ed83c34, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1734020864701 2024-12-12T16:27:45,462 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 427ff99de3004b11bd59cdaeed911606, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1734020864701 2024-12-12T16:27:45,471 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:45,486 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#B#compaction#181 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:45,487 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/ddc546d25dfe4f59b5125548cbcbc5cd is 50, key is test_row_0/B:col10/1734020864705/Put/seqid=0 2024-12-12T16:27:45,489 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412128c4b99d3c1ee4d3b9b08a12a6c4f13aa_044396069fa7748e35e43f97f084a6ce store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:45,491 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412128c4b99d3c1ee4d3b9b08a12a6c4f13aa_044396069fa7748e35e43f97f084a6ce, store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:45,491 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128c4b99d3c1ee4d3b9b08a12a6c4f13aa_044396069fa7748e35e43f97f084a6ce because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:45,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742042_1218 (size=4469) 2024-12-12T16:27:45,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742041_1217 (size=12949) 2024-12-12T16:27:45,517 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#A#compaction#180 average throughput is 0.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:45,518 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/ccdfd3ac9eec44e0a8a8500a51c92dc8 is 175, key is test_row_0/A:col10/1734020864705/Put/seqid=0 2024-12-12T16:27:45,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742043_1219 (size=31903) 2024-12-12T16:27:45,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T16:27:45,608 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,609 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T16:27:45,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:45,609 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-12T16:27:45,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:45,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:45,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:45,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:45,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:45,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:45,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212181ef70cbf64411687ca392f5253d3ea_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020865322/Put/seqid=0 2024-12-12T16:27:45,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:45,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:45,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742044_1220 (size=12454) 2024-12-12T16:27:45,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020925654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020925655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020925655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020925656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020925656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T16:27:45,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020925760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020925761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020925761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020925761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020925762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,924 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/ddc546d25dfe4f59b5125548cbcbc5cd as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/ddc546d25dfe4f59b5125548cbcbc5cd 2024-12-12T16:27:45,929 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/ccdfd3ac9eec44e0a8a8500a51c92dc8 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/ccdfd3ac9eec44e0a8a8500a51c92dc8 2024-12-12T16:27:45,930 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/B of 044396069fa7748e35e43f97f084a6ce into ddc546d25dfe4f59b5125548cbcbc5cd(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:45,930 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:45,930 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/B, priority=13, startTime=1734020865452; duration=0sec 2024-12-12T16:27:45,930 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:45,930 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:B 2024-12-12T16:27:45,930 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:27:45,932 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:27:45,932 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/C is initiating minor compaction (all files) 2024-12-12T16:27:45,932 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/C in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:45,932 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/dc3c23d96d5b44d996cc47964c80b79b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/12b80e2ede894b6f80e6cd57845dff3b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/7ceb6436c37e4cf29133a012e28235b0] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=36.3 K 2024-12-12T16:27:45,933 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting dc3c23d96d5b44d996cc47964c80b79b, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734020860421 2024-12-12T16:27:45,934 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 12b80e2ede894b6f80e6cd57845dff3b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1734020862550 2024-12-12T16:27:45,935 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/A of 044396069fa7748e35e43f97f084a6ce into ccdfd3ac9eec44e0a8a8500a51c92dc8(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:45,935 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:45,935 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/A, priority=13, startTime=1734020865452; duration=0sec 2024-12-12T16:27:45,935 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:45,935 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:A 2024-12-12T16:27:45,936 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ceb6436c37e4cf29133a012e28235b0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1734020864701 2024-12-12T16:27:45,944 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#C#compaction#183 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:45,945 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/a42a67ba13f1484480bab737ef1e1f7d is 50, key is test_row_0/C:col10/1734020864705/Put/seqid=0 2024-12-12T16:27:45,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742045_1221 (size=12949) 2024-12-12T16:27:45,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020925964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020925964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020925964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020925964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:45,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:45,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020925965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:46,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:46,050 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212181ef70cbf64411687ca392f5253d3ea_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212181ef70cbf64411687ca392f5253d3ea_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:46,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/22666d0a8e594f68850e9ca752ff9edb, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:46,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/22666d0a8e594f68850e9ca752ff9edb is 175, key is test_row_0/A:col10/1734020865322/Put/seqid=0 2024-12-12T16:27:46,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T16:27:46,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742046_1222 (size=31255) 2024-12-12T16:27:46,064 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=288, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/22666d0a8e594f68850e9ca752ff9edb 2024-12-12T16:27:46,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/6867052bcc07466082e09bb6be969275 is 50, key is test_row_0/B:col10/1734020865322/Put/seqid=0 2024-12-12T16:27:46,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742047_1223 (size=12301) 2024-12-12T16:27:46,083 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/6867052bcc07466082e09bb6be969275 2024-12-12T16:27:46,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/5b444dbf4a624635b871cdace4993440 is 50, key is test_row_0/C:col10/1734020865322/Put/seqid=0 2024-12-12T16:27:46,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742048_1224 (size=12301) 2024-12-12T16:27:46,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:46,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020926268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:46,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020926269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:46,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020926269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:46,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020926271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:46,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020926271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:46,362 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/a42a67ba13f1484480bab737ef1e1f7d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/a42a67ba13f1484480bab737ef1e1f7d 2024-12-12T16:27:46,369 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/C of 044396069fa7748e35e43f97f084a6ce into a42a67ba13f1484480bab737ef1e1f7d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:46,369 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:46,369 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/C, priority=13, startTime=1734020865452; duration=0sec 2024-12-12T16:27:46,369 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:46,369 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:C 2024-12-12T16:27:46,521 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/5b444dbf4a624635b871cdace4993440 2024-12-12T16:27:46,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/22666d0a8e594f68850e9ca752ff9edb as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/22666d0a8e594f68850e9ca752ff9edb 2024-12-12T16:27:46,535 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/22666d0a8e594f68850e9ca752ff9edb, entries=150, sequenceid=288, filesize=30.5 K 2024-12-12T16:27:46,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/6867052bcc07466082e09bb6be969275 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/6867052bcc07466082e09bb6be969275 2024-12-12T16:27:46,542 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/6867052bcc07466082e09bb6be969275, entries=150, sequenceid=288, filesize=12.0 K 2024-12-12T16:27:46,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/5b444dbf4a624635b871cdace4993440 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/5b444dbf4a624635b871cdace4993440 2024-12-12T16:27:46,550 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/5b444dbf4a624635b871cdace4993440, entries=150, sequenceid=288, filesize=12.0 K 2024-12-12T16:27:46,551 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for 044396069fa7748e35e43f97f084a6ce in 942ms, sequenceid=288, compaction requested=false 2024-12-12T16:27:46,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:46,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:46,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-12-12T16:27:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-12-12T16:27:46,553 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-12T16:27:46,553 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0960 sec 2024-12-12T16:27:46,555 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.1010 sec 2024-12-12T16:27:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T16:27:46,558 INFO [Thread-694 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-12T16:27:46,559 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:27:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-12-12T16:27:46,560 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:27:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-12T16:27:46,560 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:27:46,560 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:27:46,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-12T16:27:46,712 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:46,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-12T16:27:46,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:46,713 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-12T16:27:46,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:46,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:46,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:46,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:46,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:46,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:46,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126eeaaeee7584443e8326ea9ce06c8979_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020865655/Put/seqid=0 2024-12-12T16:27:46,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742049_1225 (size=12454) 2024-12-12T16:27:46,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:46,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:46,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:46,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020926776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:46,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:46,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020926776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:46,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:46,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020926779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:46,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:46,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020926779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:46,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:46,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020926779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:46,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-12T16:27:46,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:46,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020926880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:46,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:46,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020926880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:46,884 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:46,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020926883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:46,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:46,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020926883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:47,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:47,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020927083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:47,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:47,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020927084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:47,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:47,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020927086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:47,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:47,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020927086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:47,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:47,146 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126eeaaeee7584443e8326ea9ce06c8979_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126eeaaeee7584443e8326ea9ce06c8979_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:47,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/abe581f24653471282017e024b4ae7bc, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:47,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/abe581f24653471282017e024b4ae7bc is 175, key is test_row_0/A:col10/1734020865655/Put/seqid=0 2024-12-12T16:27:47,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742050_1226 (size=31255) 2024-12-12T16:27:47,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-12T16:27:47,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:47,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020927386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:47,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:47,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020927387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:47,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:47,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020927389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:47,394 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:47,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020927391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:47,558 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=318, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/abe581f24653471282017e024b4ae7bc 2024-12-12T16:27:47,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/c7319437e81a48c29fb409a23f93ee44 is 50, key is test_row_0/B:col10/1734020865655/Put/seqid=0 2024-12-12T16:27:47,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742051_1227 (size=12301) 2024-12-12T16:27:47,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-12T16:27:47,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:47,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020927783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:47,808 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T16:27:47,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:47,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020927890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:47,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:47,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020927893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:47,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:47,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020927893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:47,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:47,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020927896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:47,974 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/c7319437e81a48c29fb409a23f93ee44 2024-12-12T16:27:47,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/2f44a877c8e44fd3a5e2d662eb8c145b is 50, key is test_row_0/C:col10/1734020865655/Put/seqid=0 2024-12-12T16:27:47,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742052_1228 (size=12301) 2024-12-12T16:27:47,989 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/2f44a877c8e44fd3a5e2d662eb8c145b 2024-12-12T16:27:47,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/abe581f24653471282017e024b4ae7bc as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/abe581f24653471282017e024b4ae7bc 2024-12-12T16:27:47,998 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/abe581f24653471282017e024b4ae7bc, entries=150, sequenceid=318, filesize=30.5 K 2024-12-12T16:27:47,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/c7319437e81a48c29fb409a23f93ee44 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/c7319437e81a48c29fb409a23f93ee44 2024-12-12T16:27:48,003 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/c7319437e81a48c29fb409a23f93ee44, entries=150, sequenceid=318, filesize=12.0 K 2024-12-12T16:27:48,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/2f44a877c8e44fd3a5e2d662eb8c145b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/2f44a877c8e44fd3a5e2d662eb8c145b 2024-12-12T16:27:48,008 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/2f44a877c8e44fd3a5e2d662eb8c145b, entries=150, sequenceid=318, filesize=12.0 K 2024-12-12T16:27:48,011 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 044396069fa7748e35e43f97f084a6ce in 1299ms, sequenceid=318, compaction requested=true 2024-12-12T16:27:48,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:48,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:48,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-12-12T16:27:48,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-12-12T16:27:48,014 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-12T16:27:48,014 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4530 sec 2024-12-12T16:27:48,016 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.4560 sec 2024-12-12T16:27:48,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-12T16:27:48,664 INFO [Thread-694 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-12-12T16:27:48,665 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:27:48,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-12-12T16:27:48,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T16:27:48,667 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:27:48,667 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:27:48,667 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:27:48,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T16:27:48,819 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:48,820 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-12T16:27:48,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:48,820 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-12T16:27:48,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:48,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:48,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:48,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:48,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:48,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:48,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212ed085d60931442efa877ad1b3c990bfa_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020866778/Put/seqid=0 2024-12-12T16:27:48,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742053_1229 (size=12454) 2024-12-12T16:27:48,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:48,841 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212ed085d60931442efa877ad1b3c990bfa_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ed085d60931442efa877ad1b3c990bfa_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:48,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/f05fc865c001485fb11cf3312e6df469, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:48,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/f05fc865c001485fb11cf3312e6df469 is 175, key is test_row_0/A:col10/1734020866778/Put/seqid=0 2024-12-12T16:27:48,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742054_1230 (size=31255) 2024-12-12T16:27:48,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:48,900 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:48,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:48,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020928925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:48,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:48,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020928926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:48,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:48,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020928927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:48,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:48,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020928928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:48,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T16:27:49,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020929031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020929031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020929031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020929031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020929234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020929234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020929234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020929235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,249 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=327, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/f05fc865c001485fb11cf3312e6df469 2024-12-12T16:27:49,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/29a7f1464e014f4783d2464929e638ae is 50, key is test_row_0/B:col10/1734020866778/Put/seqid=0 2024-12-12T16:27:49,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742055_1231 (size=12301) 2024-12-12T16:27:49,264 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/29a7f1464e014f4783d2464929e638ae 2024-12-12T16:27:49,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T16:27:49,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/ba425df7131443509c877164f8d40ae6 is 50, key is test_row_0/C:col10/1734020866778/Put/seqid=0 2024-12-12T16:27:49,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742056_1232 (size=12301) 2024-12-12T16:27:49,291 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/ba425df7131443509c877164f8d40ae6 2024-12-12T16:27:49,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/f05fc865c001485fb11cf3312e6df469 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/f05fc865c001485fb11cf3312e6df469 2024-12-12T16:27:49,300 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/f05fc865c001485fb11cf3312e6df469, entries=150, sequenceid=327, filesize=30.5 K 2024-12-12T16:27:49,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/29a7f1464e014f4783d2464929e638ae as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/29a7f1464e014f4783d2464929e638ae 2024-12-12T16:27:49,307 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/29a7f1464e014f4783d2464929e638ae, entries=150, sequenceid=327, filesize=12.0 K 2024-12-12T16:27:49,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/ba425df7131443509c877164f8d40ae6 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/ba425df7131443509c877164f8d40ae6 2024-12-12T16:27:49,313 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/ba425df7131443509c877164f8d40ae6, entries=150, sequenceid=327, filesize=12.0 K 2024-12-12T16:27:49,314 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 044396069fa7748e35e43f97f084a6ce in 494ms, sequenceid=327, compaction requested=true 2024-12-12T16:27:49,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:49,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:49,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-12-12T16:27:49,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-12-12T16:27:49,317 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-12T16:27:49,317 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 649 msec 2024-12-12T16:27:49,319 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 653 msec 2024-12-12T16:27:49,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:49,538 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-12T16:27:49,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:49,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:49,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:49,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:49,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:49,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:49,548 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212953f773adf364f268d5cc614d1024131_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020868925/Put/seqid=0 2024-12-12T16:27:49,548 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020929543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020929546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020929549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020929549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742057_1233 (size=14994) 2024-12-12T16:27:49,650 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020929650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020929650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020929655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020929655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T16:27:49,770 INFO [Thread-694 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-12T16:27:49,771 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:27:49,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-12-12T16:27:49,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T16:27:49,773 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:27:49,774 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:27:49,774 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:27:49,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55198 deadline: 1734020929801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,803 DEBUG [Thread-692 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:27:49,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020929852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020929853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020929856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,859 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:49,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020929857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T16:27:49,925 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:49,926 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T16:27:49,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:49,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:49,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:49,926 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:49,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:49,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:49,966 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:49,971 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212953f773adf364f268d5cc614d1024131_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212953f773adf364f268d5cc614d1024131_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:49,972 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/2864e63fee3f4f9c83fe0deea0bd3a91, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:49,973 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/2864e63fee3f4f9c83fe0deea0bd3a91 is 175, key is test_row_0/A:col10/1734020868925/Put/seqid=0 2024-12-12T16:27:49,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742058_1234 (size=39949) 2024-12-12T16:27:50,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T16:27:50,079 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:50,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T16:27:50,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:50,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:50,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:50,080 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:50,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:50,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:50,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:50,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020930154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:50,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:50,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020930155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:50,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:50,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020930158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:50,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:50,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020930160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:50,232 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:50,232 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T16:27:50,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:50,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:50,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:50,233 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:50,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:50,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:50,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T16:27:50,385 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:50,385 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T16:27:50,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:50,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:50,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:50,386 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:50,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:50,387 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=355, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/2864e63fee3f4f9c83fe0deea0bd3a91 2024-12-12T16:27:50,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:50,394 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/48049af4285545c7b5c9a87f14e5904a is 50, key is test_row_0/B:col10/1734020868925/Put/seqid=0 2024-12-12T16:27:50,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742059_1235 (size=12301) 2024-12-12T16:27:50,538 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:50,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T16:27:50,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:50,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:50,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:50,539 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:50,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:50,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:50,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:50,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020930659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:50,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:50,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020930661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:50,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:50,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020930664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:50,671 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:50,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020930668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:50,691 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:50,693 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T16:27:50,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:50,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:50,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:50,693 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:50,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:50,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:50,799 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/48049af4285545c7b5c9a87f14e5904a 2024-12-12T16:27:50,808 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/404965690c6348a0a60da17e48707a8d is 50, key is test_row_0/C:col10/1734020868925/Put/seqid=0 2024-12-12T16:27:50,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742060_1236 (size=12301) 2024-12-12T16:27:50,845 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:50,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T16:27:50,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:50,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:50,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:50,847 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:50,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:50,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:50,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T16:27:50,999 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:51,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T16:27:51,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:51,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:51,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:51,000 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:51,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:51,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:51,153 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:51,153 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T16:27:51,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:51,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:51,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:51,153 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:51,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:51,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:27:51,216 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/404965690c6348a0a60da17e48707a8d 2024-12-12T16:27:51,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/2864e63fee3f4f9c83fe0deea0bd3a91 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2864e63fee3f4f9c83fe0deea0bd3a91 2024-12-12T16:27:51,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2864e63fee3f4f9c83fe0deea0bd3a91, entries=200, sequenceid=355, filesize=39.0 K 2024-12-12T16:27:51,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/48049af4285545c7b5c9a87f14e5904a as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/48049af4285545c7b5c9a87f14e5904a 2024-12-12T16:27:51,232 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/48049af4285545c7b5c9a87f14e5904a, entries=150, sequenceid=355, filesize=12.0 K 2024-12-12T16:27:51,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/404965690c6348a0a60da17e48707a8d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/404965690c6348a0a60da17e48707a8d 2024-12-12T16:27:51,237 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/404965690c6348a0a60da17e48707a8d, entries=150, sequenceid=355, filesize=12.0 K 2024-12-12T16:27:51,238 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 044396069fa7748e35e43f97f084a6ce in 1700ms, sequenceid=355, compaction requested=true 2024-12-12T16:27:51,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:51,238 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-12T16:27:51,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:27:51,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:51,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:27:51,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:51,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:27:51,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:51,238 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-12T16:27:51,240 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 165617 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-12T16:27:51,240 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/A is initiating minor compaction (all files) 2024-12-12T16:27:51,240 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/A in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:51,241 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/ccdfd3ac9eec44e0a8a8500a51c92dc8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/22666d0a8e594f68850e9ca752ff9edb, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/abe581f24653471282017e024b4ae7bc, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/f05fc865c001485fb11cf3312e6df469, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2864e63fee3f4f9c83fe0deea0bd3a91] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=161.7 K 2024-12-12T16:27:51,241 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:51,241 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/ccdfd3ac9eec44e0a8a8500a51c92dc8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/22666d0a8e594f68850e9ca752ff9edb, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/abe581f24653471282017e024b4ae7bc, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/f05fc865c001485fb11cf3312e6df469, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2864e63fee3f4f9c83fe0deea0bd3a91] 2024-12-12T16:27:51,241 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62153 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-12T16:27:51,241 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/B is initiating minor compaction (all files) 2024-12-12T16:27:51,241 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/B in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:51,241 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting ccdfd3ac9eec44e0a8a8500a51c92dc8, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1734020864701 2024-12-12T16:27:51,241 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/ddc546d25dfe4f59b5125548cbcbc5cd, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/6867052bcc07466082e09bb6be969275, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/c7319437e81a48c29fb409a23f93ee44, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/29a7f1464e014f4783d2464929e638ae, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/48049af4285545c7b5c9a87f14e5904a] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=60.7 K 2024-12-12T16:27:51,242 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting ddc546d25dfe4f59b5125548cbcbc5cd, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1734020864701 2024-12-12T16:27:51,242 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22666d0a8e594f68850e9ca752ff9edb, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1734020865320 2024-12-12T16:27:51,243 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting abe581f24653471282017e024b4ae7bc, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1734020865648 2024-12-12T16:27:51,243 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 6867052bcc07466082e09bb6be969275, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1734020865320 2024-12-12T16:27:51,243 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting c7319437e81a48c29fb409a23f93ee44, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1734020865648 2024-12-12T16:27:51,243 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting f05fc865c001485fb11cf3312e6df469, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1734020866775 2024-12-12T16:27:51,244 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 29a7f1464e014f4783d2464929e638ae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1734020866775 2024-12-12T16:27:51,244 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2864e63fee3f4f9c83fe0deea0bd3a91, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1734020868916 2024-12-12T16:27:51,244 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 48049af4285545c7b5c9a87f14e5904a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1734020868925 2024-12-12T16:27:51,256 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:51,259 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212b5309010472148a4a353ea616bc17efa_044396069fa7748e35e43f97f084a6ce store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:51,260 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#B#compaction#196 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:51,261 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212b5309010472148a4a353ea616bc17efa_044396069fa7748e35e43f97f084a6ce, store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:51,261 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/6635ba27d5494235b33d8759fa35f470 is 50, key is test_row_0/B:col10/1734020868925/Put/seqid=0 2024-12-12T16:27:51,261 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212b5309010472148a4a353ea616bc17efa_044396069fa7748e35e43f97f084a6ce because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:51,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742061_1237 (size=13119) 2024-12-12T16:27:51,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742062_1238 (size=4469) 2024-12-12T16:27:51,283 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#A#compaction#195 average throughput is 0.90 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:51,284 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/5d52f3323e4b442690c2895a7cbc3c8d is 175, key is test_row_0/A:col10/1734020868925/Put/seqid=0 2024-12-12T16:27:51,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742063_1239 (size=32073) 2024-12-12T16:27:51,295 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/5d52f3323e4b442690c2895a7cbc3c8d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/5d52f3323e4b442690c2895a7cbc3c8d 2024-12-12T16:27:51,300 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/A of 044396069fa7748e35e43f97f084a6ce into 5d52f3323e4b442690c2895a7cbc3c8d(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:51,300 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:51,300 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/A, priority=11, startTime=1734020871238; duration=0sec 2024-12-12T16:27:51,300 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:27:51,300 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:A 2024-12-12T16:27:51,300 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-12T16:27:51,303 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62153 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-12T16:27:51,304 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 044396069fa7748e35e43f97f084a6ce/C is initiating minor compaction (all files) 2024-12-12T16:27:51,304 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 044396069fa7748e35e43f97f084a6ce/C in TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:51,304 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/a42a67ba13f1484480bab737ef1e1f7d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/5b444dbf4a624635b871cdace4993440, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/2f44a877c8e44fd3a5e2d662eb8c145b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/ba425df7131443509c877164f8d40ae6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/404965690c6348a0a60da17e48707a8d] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp, totalSize=60.7 K 2024-12-12T16:27:51,304 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting a42a67ba13f1484480bab737ef1e1f7d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1734020864701 2024-12-12T16:27:51,305 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b444dbf4a624635b871cdace4993440, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1734020865320 2024-12-12T16:27:51,305 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f44a877c8e44fd3a5e2d662eb8c145b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1734020865648 2024-12-12T16:27:51,306 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:51,306 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba425df7131443509c877164f8d40ae6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1734020866775 2024-12-12T16:27:51,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T16:27:51,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:51,306 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-12T16:27:51,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:51,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:51,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:51,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:51,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:51,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:51,309 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 404965690c6348a0a60da17e48707a8d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1734020868925 2024-12-12T16:27:51,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412129adeebe9786a438d94e1014a0d773aa6_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020869547/Put/seqid=0 2024-12-12T16:27:51,324 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 044396069fa7748e35e43f97f084a6ce#C#compaction#198 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:27:51,325 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/65b10e07a1a447cfa232ed4ede1e7a9d is 50, key is test_row_0/C:col10/1734020868925/Put/seqid=0 2024-12-12T16:27:51,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742064_1240 (size=12454) 2024-12-12T16:27:51,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:51,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742065_1241 (size=13119) 2024-12-12T16:27:51,342 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412129adeebe9786a438d94e1014a0d773aa6_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129adeebe9786a438d94e1014a0d773aa6_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:51,343 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/65b10e07a1a447cfa232ed4ede1e7a9d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/65b10e07a1a447cfa232ed4ede1e7a9d 2024-12-12T16:27:51,349 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/C of 044396069fa7748e35e43f97f084a6ce into 65b10e07a1a447cfa232ed4ede1e7a9d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:51,349 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:51,349 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/C, priority=11, startTime=1734020871238; duration=0sec 2024-12-12T16:27:51,349 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:51,349 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:C 2024-12-12T16:27:51,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/575d3825cd984ea3860b1170c0d54333, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:51,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/575d3825cd984ea3860b1170c0d54333 is 175, key is test_row_0/A:col10/1734020869547/Put/seqid=0 2024-12-12T16:27:51,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742066_1242 (size=31255) 2024-12-12T16:27:51,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. as already flushing 2024-12-12T16:27:51,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:51,679 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/6635ba27d5494235b33d8759fa35f470 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/6635ba27d5494235b33d8759fa35f470 2024-12-12T16:27:51,685 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 044396069fa7748e35e43f97f084a6ce/B of 044396069fa7748e35e43f97f084a6ce into 6635ba27d5494235b33d8759fa35f470(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:27:51,685 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:51,685 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce., storeName=044396069fa7748e35e43f97f084a6ce/B, priority=11, startTime=1734020871238; duration=0sec 2024-12-12T16:27:51,685 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:51,685 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:B 2024-12-12T16:27:51,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:51,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020931693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:51,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:51,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020931694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:51,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:51,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020931700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:51,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:51,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020931701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:51,762 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=365, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/575d3825cd984ea3860b1170c0d54333 2024-12-12T16:27:51,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/9c128a1396f04b0ca9fe013ac5a27f27 is 50, key is test_row_0/B:col10/1734020869547/Put/seqid=0 2024-12-12T16:27:51,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742067_1243 (size=12301) 2024-12-12T16:27:51,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:51,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020931801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:51,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:51,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020931801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:51,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:51,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020931801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:51,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:51,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020931809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:51,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T16:27:51,901 DEBUG [Thread-695 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3448d233 to 127.0.0.1:52684 2024-12-12T16:27:51,902 DEBUG [Thread-697 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a11164b to 127.0.0.1:52684 2024-12-12T16:27:51,902 DEBUG [Thread-695 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:51,902 DEBUG [Thread-697 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:51,905 DEBUG [Thread-699 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08a7e1dd to 127.0.0.1:52684 2024-12-12T16:27:51,905 DEBUG [Thread-699 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:51,907 DEBUG [Thread-701 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31a027db to 127.0.0.1:52684 2024-12-12T16:27:51,907 DEBUG [Thread-701 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:52,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:52,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020932002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:52,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:52,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020932003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:52,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:52,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020932003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:52,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:52,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020932012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:52,196 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/9c128a1396f04b0ca9fe013ac5a27f27 2024-12-12T16:27:52,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/154d86bc5c9e4f0ba49f6c7d8c9d7964 is 50, key is test_row_0/C:col10/1734020869547/Put/seqid=0 2024-12-12T16:27:52,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742068_1244 (size=12301) 2024-12-12T16:27:52,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:52,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55226 deadline: 1734020932304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:52,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:52,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55180 deadline: 1734020932305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:52,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:52,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55190 deadline: 1734020932306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:52,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:27:52,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55194 deadline: 1734020932313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:52,609 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/154d86bc5c9e4f0ba49f6c7d8c9d7964 2024-12-12T16:27:52,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/575d3825cd984ea3860b1170c0d54333 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/575d3825cd984ea3860b1170c0d54333 2024-12-12T16:27:52,618 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/575d3825cd984ea3860b1170c0d54333, entries=150, sequenceid=365, filesize=30.5 K 2024-12-12T16:27:52,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/9c128a1396f04b0ca9fe013ac5a27f27 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/9c128a1396f04b0ca9fe013ac5a27f27 2024-12-12T16:27:52,623 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/9c128a1396f04b0ca9fe013ac5a27f27, entries=150, sequenceid=365, filesize=12.0 K 2024-12-12T16:27:52,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/154d86bc5c9e4f0ba49f6c7d8c9d7964 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/154d86bc5c9e4f0ba49f6c7d8c9d7964 2024-12-12T16:27:52,627 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/154d86bc5c9e4f0ba49f6c7d8c9d7964, entries=150, sequenceid=365, filesize=12.0 K 2024-12-12T16:27:52,628 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for 044396069fa7748e35e43f97f084a6ce in 1322ms, sequenceid=365, compaction requested=false 2024-12-12T16:27:52,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:52,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:52,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-12-12T16:27:52,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-12-12T16:27:52,630 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-12T16:27:52,630 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8550 sec 2024-12-12T16:27:52,632 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 2.8600 sec 2024-12-12T16:27:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:52,808 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-12T16:27:52,808 DEBUG [Thread-684 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x491ea2ee to 127.0.0.1:52684 2024-12-12T16:27:52,808 DEBUG [Thread-684 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:52,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:52,809 DEBUG [Thread-688 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x46114993 to 127.0.0.1:52684 2024-12-12T16:27:52,809 DEBUG [Thread-688 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:52,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:52,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:52,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:52,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:52,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:52,812 DEBUG [Thread-686 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0b44b1e5 to 127.0.0.1:52684 2024-12-12T16:27:52,812 DEBUG [Thread-686 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:52,815 DEBUG [Thread-690 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2885d2d9 to 127.0.0.1:52684 2024-12-12T16:27:52,815 DEBUG [Thread-690 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:52,817 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212360080bb268b442194a40819fe0423a9_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_0/A:col10/1734020871689/Put/seqid=0 2024-12-12T16:27:52,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742069_1245 (size=12454) 2024-12-12T16:27:53,225 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:53,230 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212360080bb268b442194a40819fe0423a9_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212360080bb268b442194a40819fe0423a9_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:53,230 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/a111e914b1f84ae787a8a63675d428a3, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:53,231 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/a111e914b1f84ae787a8a63675d428a3 is 175, key is test_row_0/A:col10/1734020871689/Put/seqid=0 2024-12-12T16:27:53,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742070_1246 (size=31255) 2024-12-12T16:27:53,457 DEBUG [master/4f6a4780a2f6:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region f489058ee189c52324fddaaf21558958 changed from -1.0 to 0.0, refreshing cache 2024-12-12T16:27:53,635 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=397, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/a111e914b1f84ae787a8a63675d428a3 2024-12-12T16:27:53,642 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/3b381b119e4042fca641c26af9a9b038 is 50, key is test_row_0/B:col10/1734020871689/Put/seqid=0 2024-12-12T16:27:53,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742071_1247 (size=12301) 2024-12-12T16:27:53,807 DEBUG [Thread-692 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x152377d4 to 127.0.0.1:52684 2024-12-12T16:27:53,807 DEBUG [Thread-692 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:53,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T16:27:53,877 INFO [Thread-694 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-12-12T16:27:53,878 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T16:27:53,878 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-12-12T16:27:53,878 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 68 2024-12-12T16:27:53,878 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-12-12T16:27:53,878 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 65 2024-12-12T16:27:53,878 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 48 2024-12-12T16:27:53,878 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T16:27:53,878 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5955 2024-12-12T16:27:53,878 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6039 2024-12-12T16:27:53,878 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T16:27:53,878 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2621 2024-12-12T16:27:53,878 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7863 rows 2024-12-12T16:27:53,878 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2586 2024-12-12T16:27:53,878 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7758 rows 2024-12-12T16:27:53,878 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T16:27:53,878 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6eb305fc to 127.0.0.1:52684 2024-12-12T16:27:53,878 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:27:53,880 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T16:27:53,880 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T16:27:53,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T16:27:53,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T16:27:53,883 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020873883"}]},"ts":"1734020873883"} 2024-12-12T16:27:53,885 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T16:27:53,888 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T16:27:53,889 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T16:27:53,890 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=044396069fa7748e35e43f97f084a6ce, UNASSIGN}] 2024-12-12T16:27:53,891 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=044396069fa7748e35e43f97f084a6ce, UNASSIGN 2024-12-12T16:27:53,891 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=044396069fa7748e35e43f97f084a6ce, regionState=CLOSING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:53,892 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T16:27:53,892 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; CloseRegionProcedure 044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:27:53,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T16:27:54,043 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:54,044 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(124): Close 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:54,044 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T16:27:54,044 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1681): Closing 044396069fa7748e35e43f97f084a6ce, disabling compactions & flushes 2024-12-12T16:27:54,044 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:54,048 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/3b381b119e4042fca641c26af9a9b038 2024-12-12T16:27:54,055 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/b30b1784b5e24d978baa596b493c2c0e is 50, key is test_row_0/C:col10/1734020871689/Put/seqid=0 2024-12-12T16:27:54,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742072_1248 (size=12301) 2024-12-12T16:27:54,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T16:27:54,459 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/b30b1784b5e24d978baa596b493c2c0e 2024-12-12T16:27:54,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/a111e914b1f84ae787a8a63675d428a3 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/a111e914b1f84ae787a8a63675d428a3 2024-12-12T16:27:54,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/a111e914b1f84ae787a8a63675d428a3, entries=150, sequenceid=397, filesize=30.5 K 2024-12-12T16:27:54,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/3b381b119e4042fca641c26af9a9b038 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/3b381b119e4042fca641c26af9a9b038 2024-12-12T16:27:54,472 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/3b381b119e4042fca641c26af9a9b038, entries=150, sequenceid=397, filesize=12.0 K 2024-12-12T16:27:54,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/b30b1784b5e24d978baa596b493c2c0e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/b30b1784b5e24d978baa596b493c2c0e 2024-12-12T16:27:54,476 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/b30b1784b5e24d978baa596b493c2c0e, entries=150, sequenceid=397, filesize=12.0 K 2024-12-12T16:27:54,477 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 044396069fa7748e35e43f97f084a6ce in 1669ms, sequenceid=397, compaction requested=true 2024-12-12T16:27:54,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:54,477 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:54,477 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:54,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:27:54,477 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. after waiting 0 ms 2024-12-12T16:27:54,477 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:54,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:54,477 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. because compaction request was cancelled 2024-12-12T16:27:54,477 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:A 2024-12-12T16:27:54,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:27:54,478 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. because compaction request was cancelled 2024-12-12T16:27:54,478 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(2837): Flushing 044396069fa7748e35e43f97f084a6ce 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-12T16:27:54,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:54,478 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:B 2024-12-12T16:27:54,478 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. because compaction request was cancelled 2024-12-12T16:27:54,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 044396069fa7748e35e43f97f084a6ce:C, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:27:54,478 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 044396069fa7748e35e43f97f084a6ce:C 2024-12-12T16:27:54,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:27:54,478 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=A 2024-12-12T16:27:54,478 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:54,478 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=B 2024-12-12T16:27:54,478 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:54,478 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 044396069fa7748e35e43f97f084a6ce, store=C 2024-12-12T16:27:54,478 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:27:54,484 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212405b69ecf65842cb8bf19fc113e8049f_044396069fa7748e35e43f97f084a6ce is 50, key is test_row_1/A:col10/1734020872814/Put/seqid=0 2024-12-12T16:27:54,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T16:27:54,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742073_1249 (size=9914) 2024-12-12T16:27:54,888 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:54,892 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212405b69ecf65842cb8bf19fc113e8049f_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212405b69ecf65842cb8bf19fc113e8049f_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:54,893 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/f0a39d59998242fb8f8567bfbf19177d, store: [table=TestAcidGuarantees family=A region=044396069fa7748e35e43f97f084a6ce] 2024-12-12T16:27:54,894 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/f0a39d59998242fb8f8567bfbf19177d is 175, key is test_row_1/A:col10/1734020872814/Put/seqid=0 2024-12-12T16:27:54,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742074_1250 (size=22561) 2024-12-12T16:27:54,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T16:27:55,298 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=403, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/f0a39d59998242fb8f8567bfbf19177d 2024-12-12T16:27:55,305 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/4876063cd093497aa0ac8df04a334d82 is 50, key is test_row_1/B:col10/1734020872814/Put/seqid=0 2024-12-12T16:27:55,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742075_1251 (size=9857) 2024-12-12T16:27:55,709 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/4876063cd093497aa0ac8df04a334d82 2024-12-12T16:27:55,716 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/db9f1eb5a0d04c77a37f6793a2d90217 is 50, key is test_row_1/C:col10/1734020872814/Put/seqid=0 2024-12-12T16:27:55,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742076_1252 (size=9857) 2024-12-12T16:27:55,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T16:27:56,121 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/db9f1eb5a0d04c77a37f6793a2d90217 2024-12-12T16:27:56,126 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/A/f0a39d59998242fb8f8567bfbf19177d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/f0a39d59998242fb8f8567bfbf19177d 2024-12-12T16:27:56,129 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/f0a39d59998242fb8f8567bfbf19177d, entries=100, sequenceid=403, filesize=22.0 K 2024-12-12T16:27:56,130 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/B/4876063cd093497aa0ac8df04a334d82 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/4876063cd093497aa0ac8df04a334d82 2024-12-12T16:27:56,133 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/4876063cd093497aa0ac8df04a334d82, entries=100, sequenceid=403, filesize=9.6 K 2024-12-12T16:27:56,134 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/.tmp/C/db9f1eb5a0d04c77a37f6793a2d90217 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/db9f1eb5a0d04c77a37f6793a2d90217 2024-12-12T16:27:56,137 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/db9f1eb5a0d04c77a37f6793a2d90217, entries=100, sequenceid=403, filesize=9.6 K 2024-12-12T16:27:56,138 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 044396069fa7748e35e43f97f084a6ce in 1661ms, sequenceid=403, compaction requested=true 2024-12-12T16:27:56,139 DEBUG [StoreCloser-TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/b1bb9880adbb4181aa9f2ee1f6dd4cfc, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/8ebe1def5afd40b3808246e3975e876b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2143f7b991ef43e881fd12333e89b911, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/71d955331f4d486d94e38b97d7d90ba5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/9abc097ce61a4b30b6f4395421c672c8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/5f9f67b6cfb34603947ac5d74fb69b19, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/9f88e2c0f8ca4ffc8083e6c999cf8e64, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/a650423420a04646af6a7b4ca0350170, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/57a3393156474797995e385c91c1d793, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/3d1edd717ed0456cacff15bf5ee9e645, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/e0b2c7ab390c4ed3afa867ac24bd07f2, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2c5a1f887cf04e61aaa99bd06df3fe1b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/70717b8ab4d147449c2aa55c309e48d3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/864dd3d65a6d4bb1b45ab0981eac8a3b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/b1db9123062b4e0c88754190035c7c0c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/0bc11d64afbb49c290ad22a73c8519ef, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/7ed867db702e42c4be5d2a2911ec4c11, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2ad5134368f1465380b45b11ef4baf95, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/f9a89ba336734d71ad997dab2ed83c34, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/ccdfd3ac9eec44e0a8a8500a51c92dc8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/22666d0a8e594f68850e9ca752ff9edb, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/abe581f24653471282017e024b4ae7bc, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/f05fc865c001485fb11cf3312e6df469, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2864e63fee3f4f9c83fe0deea0bd3a91] to archive 2024-12-12T16:27:56,139 DEBUG [StoreCloser-TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:27:56,143 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/9abc097ce61a4b30b6f4395421c672c8 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/9abc097ce61a4b30b6f4395421c672c8 2024-12-12T16:27:56,143 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/b1bb9880adbb4181aa9f2ee1f6dd4cfc to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/b1bb9880adbb4181aa9f2ee1f6dd4cfc 2024-12-12T16:27:56,143 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/8ebe1def5afd40b3808246e3975e876b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/8ebe1def5afd40b3808246e3975e876b 2024-12-12T16:27:56,143 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/71d955331f4d486d94e38b97d7d90ba5 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/71d955331f4d486d94e38b97d7d90ba5 2024-12-12T16:27:56,143 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/9f88e2c0f8ca4ffc8083e6c999cf8e64 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/9f88e2c0f8ca4ffc8083e6c999cf8e64 2024-12-12T16:27:56,143 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2143f7b991ef43e881fd12333e89b911 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2143f7b991ef43e881fd12333e89b911 2024-12-12T16:27:56,144 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/5f9f67b6cfb34603947ac5d74fb69b19 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/5f9f67b6cfb34603947ac5d74fb69b19 2024-12-12T16:27:56,144 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/a650423420a04646af6a7b4ca0350170 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/a650423420a04646af6a7b4ca0350170 2024-12-12T16:27:56,145 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/e0b2c7ab390c4ed3afa867ac24bd07f2 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/e0b2c7ab390c4ed3afa867ac24bd07f2 2024-12-12T16:27:56,145 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/864dd3d65a6d4bb1b45ab0981eac8a3b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/864dd3d65a6d4bb1b45ab0981eac8a3b 2024-12-12T16:27:56,145 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/b1db9123062b4e0c88754190035c7c0c to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/b1db9123062b4e0c88754190035c7c0c 2024-12-12T16:27:56,146 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/3d1edd717ed0456cacff15bf5ee9e645 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/3d1edd717ed0456cacff15bf5ee9e645 2024-12-12T16:27:56,146 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/0bc11d64afbb49c290ad22a73c8519ef to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/0bc11d64afbb49c290ad22a73c8519ef 2024-12-12T16:27:56,146 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/57a3393156474797995e385c91c1d793 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/57a3393156474797995e385c91c1d793 2024-12-12T16:27:56,147 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2c5a1f887cf04e61aaa99bd06df3fe1b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2c5a1f887cf04e61aaa99bd06df3fe1b 2024-12-12T16:27:56,147 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/70717b8ab4d147449c2aa55c309e48d3 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/70717b8ab4d147449c2aa55c309e48d3 2024-12-12T16:27:56,148 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2ad5134368f1465380b45b11ef4baf95 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2ad5134368f1465380b45b11ef4baf95 2024-12-12T16:27:56,149 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/7ed867db702e42c4be5d2a2911ec4c11 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/7ed867db702e42c4be5d2a2911ec4c11 2024-12-12T16:27:56,149 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/22666d0a8e594f68850e9ca752ff9edb to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/22666d0a8e594f68850e9ca752ff9edb 2024-12-12T16:27:56,149 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/f9a89ba336734d71ad997dab2ed83c34 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/f9a89ba336734d71ad997dab2ed83c34 2024-12-12T16:27:56,149 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/ccdfd3ac9eec44e0a8a8500a51c92dc8 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/ccdfd3ac9eec44e0a8a8500a51c92dc8 2024-12-12T16:27:56,149 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/abe581f24653471282017e024b4ae7bc to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/abe581f24653471282017e024b4ae7bc 2024-12-12T16:27:56,149 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/f05fc865c001485fb11cf3312e6df469 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/f05fc865c001485fb11cf3312e6df469 2024-12-12T16:27:56,149 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2864e63fee3f4f9c83fe0deea0bd3a91 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/2864e63fee3f4f9c83fe0deea0bd3a91 2024-12-12T16:27:56,151 DEBUG [StoreCloser-TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/55e53d566c4d4334ab7e4bb03fc43a27, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/9dd0b13de7514e59932a6a6d5175a3d5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/4a953b28d65b446680d7c72caed76ad0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/202fd29bd37c4265808ce2baeb1d0b36, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/52b43a5df4e6426fa587b9503779a142, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/ab60897030ca4081a9b598ef46236369, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/e2916e19d81446d59716aa12d28e0905, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/ece10e06e8a14854b74ea859255b76d9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/7fb684ebbc294f24a3f6c0da5b7ecc67, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/8087b022c8f34e01afb7b0d56a782732, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/f946372aae2b4a25bb2f5f94ddb3f9ee, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/8de13515a3024316b977b94c1db5951b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/be7f766b92724b2e9a48ca8127885fd6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/0fbd022afd984d07a33a88d83d92ac5c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/5779e1f2410c4df69730fc01f976b2a4, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/e99d405c0eb142bfa9ee5474ee4addee, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/3d766a3f93f348baa8108191186364a0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/300e0f71b00b415d964ea95d449c3a8f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/ddc546d25dfe4f59b5125548cbcbc5cd, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/427ff99de3004b11bd59cdaeed911606, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/6867052bcc07466082e09bb6be969275, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/c7319437e81a48c29fb409a23f93ee44, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/29a7f1464e014f4783d2464929e638ae, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/48049af4285545c7b5c9a87f14e5904a] to archive 2024-12-12T16:27:56,151 DEBUG [StoreCloser-TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:27:56,154 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/9dd0b13de7514e59932a6a6d5175a3d5 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/9dd0b13de7514e59932a6a6d5175a3d5 2024-12-12T16:27:56,154 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/55e53d566c4d4334ab7e4bb03fc43a27 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/55e53d566c4d4334ab7e4bb03fc43a27 2024-12-12T16:27:56,154 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/ece10e06e8a14854b74ea859255b76d9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/ece10e06e8a14854b74ea859255b76d9 2024-12-12T16:27:56,154 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/52b43a5df4e6426fa587b9503779a142 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/52b43a5df4e6426fa587b9503779a142 2024-12-12T16:27:56,154 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/ab60897030ca4081a9b598ef46236369 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/ab60897030ca4081a9b598ef46236369 2024-12-12T16:27:56,155 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/e2916e19d81446d59716aa12d28e0905 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/e2916e19d81446d59716aa12d28e0905 2024-12-12T16:27:56,155 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/4a953b28d65b446680d7c72caed76ad0 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/4a953b28d65b446680d7c72caed76ad0 2024-12-12T16:27:56,156 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/202fd29bd37c4265808ce2baeb1d0b36 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/202fd29bd37c4265808ce2baeb1d0b36 2024-12-12T16:27:56,157 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/7fb684ebbc294f24a3f6c0da5b7ecc67 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/7fb684ebbc294f24a3f6c0da5b7ecc67 2024-12-12T16:27:56,157 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/8087b022c8f34e01afb7b0d56a782732 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/8087b022c8f34e01afb7b0d56a782732 2024-12-12T16:27:56,157 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/0fbd022afd984d07a33a88d83d92ac5c to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/0fbd022afd984d07a33a88d83d92ac5c 2024-12-12T16:27:56,157 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/8de13515a3024316b977b94c1db5951b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/8de13515a3024316b977b94c1db5951b 2024-12-12T16:27:56,157 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/f946372aae2b4a25bb2f5f94ddb3f9ee to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/f946372aae2b4a25bb2f5f94ddb3f9ee 2024-12-12T16:27:56,158 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/5779e1f2410c4df69730fc01f976b2a4 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/5779e1f2410c4df69730fc01f976b2a4 2024-12-12T16:27:56,158 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/be7f766b92724b2e9a48ca8127885fd6 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/be7f766b92724b2e9a48ca8127885fd6 2024-12-12T16:27:56,158 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/e99d405c0eb142bfa9ee5474ee4addee to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/e99d405c0eb142bfa9ee5474ee4addee 2024-12-12T16:27:56,159 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/427ff99de3004b11bd59cdaeed911606 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/427ff99de3004b11bd59cdaeed911606 2024-12-12T16:27:56,160 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/300e0f71b00b415d964ea95d449c3a8f to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/300e0f71b00b415d964ea95d449c3a8f 2024-12-12T16:27:56,160 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/c7319437e81a48c29fb409a23f93ee44 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/c7319437e81a48c29fb409a23f93ee44 2024-12-12T16:27:56,161 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/6867052bcc07466082e09bb6be969275 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/6867052bcc07466082e09bb6be969275 2024-12-12T16:27:56,161 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/29a7f1464e014f4783d2464929e638ae to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/29a7f1464e014f4783d2464929e638ae 2024-12-12T16:27:56,161 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/48049af4285545c7b5c9a87f14e5904a to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/48049af4285545c7b5c9a87f14e5904a 2024-12-12T16:27:56,161 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/3d766a3f93f348baa8108191186364a0 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/3d766a3f93f348baa8108191186364a0 2024-12-12T16:27:56,161 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/ddc546d25dfe4f59b5125548cbcbc5cd to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/ddc546d25dfe4f59b5125548cbcbc5cd 2024-12-12T16:27:56,162 DEBUG [StoreCloser-TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/c170f1eb26df44c39eadc2404bcb7683, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/32f68485c48a45bda8b8786d4c1afd3f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/641fcf0e34a34517a7aaefe8c6de162d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/f6f4b04463304333a8e9d002828015fc, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/eb5c6d553303417c806332dd1586b747, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/378708e3b44b49a08bd8e5ff2c1f15e9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/7a994c318ba64baea43a23c23f6f18bc, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/3aa264f9203b4731a2740e2776491c3d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/ea46d63b05cd4850aacc321092f4481b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/77951a13bd5f43be85c238002360a96d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/8fadccfbe23643508fdd1c28fbb94bfe, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/009dbe2e3a75470c8f9753ae1f439152, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/48ebb81395004f22a81656c157e05bf8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/daf53761b6d2405e921bdd58dd84f6fa, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/89cbc37040bb4656aef218a03c0c8258, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/dc3c23d96d5b44d996cc47964c80b79b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/dee4597c3a7b4b55b3772a6f04cfc246, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/12b80e2ede894b6f80e6cd57845dff3b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/a42a67ba13f1484480bab737ef1e1f7d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/7ceb6436c37e4cf29133a012e28235b0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/5b444dbf4a624635b871cdace4993440, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/2f44a877c8e44fd3a5e2d662eb8c145b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/ba425df7131443509c877164f8d40ae6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/404965690c6348a0a60da17e48707a8d] to archive 2024-12-12T16:27:56,163 DEBUG [StoreCloser-TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:27:56,166 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/c170f1eb26df44c39eadc2404bcb7683 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/c170f1eb26df44c39eadc2404bcb7683 2024-12-12T16:27:56,166 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/32f68485c48a45bda8b8786d4c1afd3f to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/32f68485c48a45bda8b8786d4c1afd3f 2024-12-12T16:27:56,166 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/f6f4b04463304333a8e9d002828015fc to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/f6f4b04463304333a8e9d002828015fc 2024-12-12T16:27:56,167 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/7a994c318ba64baea43a23c23f6f18bc to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/7a994c318ba64baea43a23c23f6f18bc 2024-12-12T16:27:56,167 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/eb5c6d553303417c806332dd1586b747 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/eb5c6d553303417c806332dd1586b747 2024-12-12T16:27:56,167 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/378708e3b44b49a08bd8e5ff2c1f15e9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/378708e3b44b49a08bd8e5ff2c1f15e9 2024-12-12T16:27:56,168 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/641fcf0e34a34517a7aaefe8c6de162d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/641fcf0e34a34517a7aaefe8c6de162d 2024-12-12T16:27:56,168 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/3aa264f9203b4731a2740e2776491c3d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/3aa264f9203b4731a2740e2776491c3d 2024-12-12T16:27:56,170 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/ea46d63b05cd4850aacc321092f4481b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/ea46d63b05cd4850aacc321092f4481b 2024-12-12T16:27:56,170 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/77951a13bd5f43be85c238002360a96d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/77951a13bd5f43be85c238002360a96d 2024-12-12T16:27:56,170 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/009dbe2e3a75470c8f9753ae1f439152 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/009dbe2e3a75470c8f9753ae1f439152 2024-12-12T16:27:56,170 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/8fadccfbe23643508fdd1c28fbb94bfe to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/8fadccfbe23643508fdd1c28fbb94bfe 2024-12-12T16:27:56,171 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/48ebb81395004f22a81656c157e05bf8 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/48ebb81395004f22a81656c157e05bf8 2024-12-12T16:27:56,171 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/daf53761b6d2405e921bdd58dd84f6fa to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/daf53761b6d2405e921bdd58dd84f6fa 2024-12-12T16:27:56,171 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/89cbc37040bb4656aef218a03c0c8258 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/89cbc37040bb4656aef218a03c0c8258 2024-12-12T16:27:56,171 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/dc3c23d96d5b44d996cc47964c80b79b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/dc3c23d96d5b44d996cc47964c80b79b 2024-12-12T16:27:56,173 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/a42a67ba13f1484480bab737ef1e1f7d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/a42a67ba13f1484480bab737ef1e1f7d 2024-12-12T16:27:56,173 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/12b80e2ede894b6f80e6cd57845dff3b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/12b80e2ede894b6f80e6cd57845dff3b 2024-12-12T16:27:56,173 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/7ceb6436c37e4cf29133a012e28235b0 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/7ceb6436c37e4cf29133a012e28235b0 2024-12-12T16:27:56,173 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/dee4597c3a7b4b55b3772a6f04cfc246 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/dee4597c3a7b4b55b3772a6f04cfc246 2024-12-12T16:27:56,173 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/ba425df7131443509c877164f8d40ae6 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/ba425df7131443509c877164f8d40ae6 2024-12-12T16:27:56,173 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/2f44a877c8e44fd3a5e2d662eb8c145b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/2f44a877c8e44fd3a5e2d662eb8c145b 2024-12-12T16:27:56,174 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/5b444dbf4a624635b871cdace4993440 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/5b444dbf4a624635b871cdace4993440 2024-12-12T16:27:56,174 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/404965690c6348a0a60da17e48707a8d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/404965690c6348a0a60da17e48707a8d 2024-12-12T16:27:56,178 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/recovered.edits/406.seqid, newMaxSeqId=406, maxSeqId=4 2024-12-12T16:27:56,179 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce. 2024-12-12T16:27:56,179 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1635): Region close journal for 044396069fa7748e35e43f97f084a6ce: 2024-12-12T16:27:56,181 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(170): Closed 044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:56,181 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=044396069fa7748e35e43f97f084a6ce, regionState=CLOSED 2024-12-12T16:27:56,183 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-12T16:27:56,183 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; CloseRegionProcedure 044396069fa7748e35e43f97f084a6ce, server=4f6a4780a2f6,41933,1734020809476 in 2.2900 sec 2024-12-12T16:27:56,184 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-12-12T16:27:56,184 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=044396069fa7748e35e43f97f084a6ce, UNASSIGN in 2.2930 sec 2024-12-12T16:27:56,186 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-12T16:27:56,186 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.2970 sec 2024-12-12T16:27:56,187 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020876187"}]},"ts":"1734020876187"} 2024-12-12T16:27:56,187 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T16:27:56,189 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T16:27:56,191 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.3100 sec 2024-12-12T16:27:57,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T16:27:57,988 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-12T16:27:57,988 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T16:27:57,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:27:57,990 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:27:57,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-12T16:27:57,990 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=67, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:27:57,992 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:57,994 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A, FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B, FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C, FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/recovered.edits] 2024-12-12T16:27:57,998 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/f0a39d59998242fb8f8567bfbf19177d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/f0a39d59998242fb8f8567bfbf19177d 2024-12-12T16:27:57,998 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/575d3825cd984ea3860b1170c0d54333 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/575d3825cd984ea3860b1170c0d54333 2024-12-12T16:27:57,998 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/5d52f3323e4b442690c2895a7cbc3c8d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/5d52f3323e4b442690c2895a7cbc3c8d 2024-12-12T16:27:57,998 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/a111e914b1f84ae787a8a63675d428a3 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/A/a111e914b1f84ae787a8a63675d428a3 2024-12-12T16:27:58,001 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/4876063cd093497aa0ac8df04a334d82 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/4876063cd093497aa0ac8df04a334d82 2024-12-12T16:27:58,001 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/6635ba27d5494235b33d8759fa35f470 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/6635ba27d5494235b33d8759fa35f470 2024-12-12T16:27:58,002 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/3b381b119e4042fca641c26af9a9b038 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/3b381b119e4042fca641c26af9a9b038 2024-12-12T16:27:58,002 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/9c128a1396f04b0ca9fe013ac5a27f27 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/B/9c128a1396f04b0ca9fe013ac5a27f27 2024-12-12T16:27:58,005 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/154d86bc5c9e4f0ba49f6c7d8c9d7964 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/154d86bc5c9e4f0ba49f6c7d8c9d7964 2024-12-12T16:27:58,005 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/db9f1eb5a0d04c77a37f6793a2d90217 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/db9f1eb5a0d04c77a37f6793a2d90217 2024-12-12T16:27:58,005 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/65b10e07a1a447cfa232ed4ede1e7a9d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/65b10e07a1a447cfa232ed4ede1e7a9d 2024-12-12T16:27:58,005 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/b30b1784b5e24d978baa596b493c2c0e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/C/b30b1784b5e24d978baa596b493c2c0e 2024-12-12T16:27:58,008 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/recovered.edits/406.seqid to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce/recovered.edits/406.seqid 2024-12-12T16:27:58,009 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,009 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T16:27:58,009 DEBUG [PEWorker-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T16:27:58,010 DEBUG [PEWorker-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-12T16:27:58,019 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123ef5ec7273834198af2cc7afd0510644_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123ef5ec7273834198af2cc7afd0510644_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,020 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212021ca684a1d3400b9175e4240b4270f7_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212021ca684a1d3400b9175e4240b4270f7_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,021 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212181ef70cbf64411687ca392f5253d3ea_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212181ef70cbf64411687ca392f5253d3ea_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,021 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212172ac6630b8f4a8dbfad8b5ea379176b_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212172ac6630b8f4a8dbfad8b5ea379176b_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,021 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212360080bb268b442194a40819fe0423a9_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212360080bb268b442194a40819fe0423a9_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,021 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126eeaaeee7584443e8326ea9ce06c8979_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126eeaaeee7584443e8326ea9ce06c8979_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,021 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128443405ca4a14a98951cc1a185c51cee_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128443405ca4a14a98951cc1a185c51cee_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,021 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212405b69ecf65842cb8bf19fc113e8049f_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212405b69ecf65842cb8bf19fc113e8049f_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,022 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212953f773adf364f268d5cc614d1024131_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212953f773adf364f268d5cc614d1024131_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,022 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121297f1978ff01e475aacf61d6776c95174_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121297f1978ff01e475aacf61d6776c95174_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,023 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129adeebe9786a438d94e1014a0d773aa6_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129adeebe9786a438d94e1014a0d773aa6_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,024 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212bc71a22ab462452a8eec2b642e66d2e1_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212bc71a22ab462452a8eec2b642e66d2e1_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,024 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c614c8468a2949ddb9e87971f8b8c649_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c614c8468a2949ddb9e87971f8b8c649_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,024 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212b612f636f48647aaa26a091ec82521ba_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212b612f636f48647aaa26a091ec82521ba_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,024 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d1e9e73ec65a42f3af9ce6b8e03b42e6_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d1e9e73ec65a42f3af9ce6b8e03b42e6_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,025 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d5dfe804d60f47fcaf5cfdb45166e6f5_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d5dfe804d60f47fcaf5cfdb45166e6f5_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,026 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e16a14ad85b343838ef575c0fa1c4d05_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e16a14ad85b343838ef575c0fa1c4d05_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,026 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e613105939224c5183e0776d2b035e4d_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e613105939224c5183e0776d2b035e4d_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,026 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f5ecc4c47b494b159c3a8af4c103b8d8_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f5ecc4c47b494b159c3a8af4c103b8d8_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,026 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ed085d60931442efa877ad1b3c990bfa_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ed085d60931442efa877ad1b3c990bfa_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,026 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f5a2226c34354dbe8252868dfb2b54b0_044396069fa7748e35e43f97f084a6ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f5a2226c34354dbe8252868dfb2b54b0_044396069fa7748e35e43f97f084a6ce 2024-12-12T16:27:58,027 DEBUG [PEWorker-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T16:27:58,029 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=67, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:27:58,031 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T16:27:58,034 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T16:27:58,034 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=67, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:27:58,034 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T16:27:58,035 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734020878034"}]},"ts":"9223372036854775807"} 2024-12-12T16:27:58,036 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T16:27:58,036 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 044396069fa7748e35e43f97f084a6ce, NAME => 'TestAcidGuarantees,,1734020848721.044396069fa7748e35e43f97f084a6ce.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T16:27:58,036 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T16:27:58,037 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734020878036"}]},"ts":"9223372036854775807"} 2024-12-12T16:27:58,038 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T16:27:58,043 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=67, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:27:58,044 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 55 msec 2024-12-12T16:27:58,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-12T16:27:58,091 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-12T16:27:58,101 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=247 (was 246) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-84974713_22 at /127.0.0.1:57050 [Waiting for operation #605] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_305173988_22 at /127.0.0.1:40560 [Waiting for operation #737] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x526f2908-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/cluster_c3fbcf95-de6c-01d4-3e6c-8a7f0857b2b0/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_305173988_22 at /127.0.0.1:57052 [Waiting for operation #615] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x526f2908-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x526f2908-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/cluster_c3fbcf95-de6c-01d4-3e6c-8a7f0857b2b0/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x526f2908-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-84974713_22 at /127.0.0.1:40492 [Waiting for operation #781] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=459 (was 449) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=364 (was 318) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7807 (was 7890) 2024-12-12T16:27:58,110 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=247, OpenFileDescriptor=459, MaxFileDescriptor=1048576, SystemLoadAverage=364, ProcessCount=11, AvailableMemoryMB=7807 2024-12-12T16:27:58,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T16:27:58,111 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T16:27:58,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T16:27:58,113 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T16:27:58,113 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:58,113 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 68 2024-12-12T16:27:58,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-12T16:27:58,114 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T16:27:58,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742077_1253 (size=963) 2024-12-12T16:27:58,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-12T16:27:58,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-12T16:27:58,521 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc 2024-12-12T16:27:58,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742078_1254 (size=53) 2024-12-12T16:27:58,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-12T16:27:58,927 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:27:58,927 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing d67e4a55402e756819e4e8a994aa3c46, disabling compactions & flushes 2024-12-12T16:27:58,928 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:27:58,928 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:27:58,928 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. after waiting 0 ms 2024-12-12T16:27:58,928 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:27:58,928 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:27:58,928 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:27:58,929 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T16:27:58,929 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734020878929"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734020878929"}]},"ts":"1734020878929"} 2024-12-12T16:27:58,930 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T16:27:58,931 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T16:27:58,931 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020878931"}]},"ts":"1734020878931"} 2024-12-12T16:27:58,932 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T16:27:58,935 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d67e4a55402e756819e4e8a994aa3c46, ASSIGN}] 2024-12-12T16:27:58,936 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d67e4a55402e756819e4e8a994aa3c46, ASSIGN 2024-12-12T16:27:58,936 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d67e4a55402e756819e4e8a994aa3c46, ASSIGN; state=OFFLINE, location=4f6a4780a2f6,41933,1734020809476; forceNewPlan=false, retain=false 2024-12-12T16:27:59,087 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=d67e4a55402e756819e4e8a994aa3c46, regionState=OPENING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:59,088 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; OpenRegionProcedure d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:27:59,216 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-12T16:27:59,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-12T16:27:59,239 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:59,243 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:27:59,243 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7285): Opening region: {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} 2024-12-12T16:27:59,243 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:27:59,243 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:27:59,244 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7327): checking encryption for d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:27:59,244 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7330): checking classloading for d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:27:59,245 INFO [StoreOpener-d67e4a55402e756819e4e8a994aa3c46-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:27:59,246 INFO [StoreOpener-d67e4a55402e756819e4e8a994aa3c46-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:27:59,246 INFO [StoreOpener-d67e4a55402e756819e4e8a994aa3c46-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d67e4a55402e756819e4e8a994aa3c46 columnFamilyName A 2024-12-12T16:27:59,246 DEBUG [StoreOpener-d67e4a55402e756819e4e8a994aa3c46-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:59,247 INFO [StoreOpener-d67e4a55402e756819e4e8a994aa3c46-1 {}] regionserver.HStore(327): Store=d67e4a55402e756819e4e8a994aa3c46/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:27:59,247 INFO [StoreOpener-d67e4a55402e756819e4e8a994aa3c46-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:27:59,248 INFO [StoreOpener-d67e4a55402e756819e4e8a994aa3c46-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:27:59,248 INFO [StoreOpener-d67e4a55402e756819e4e8a994aa3c46-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d67e4a55402e756819e4e8a994aa3c46 columnFamilyName B 2024-12-12T16:27:59,248 DEBUG [StoreOpener-d67e4a55402e756819e4e8a994aa3c46-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:59,248 INFO [StoreOpener-d67e4a55402e756819e4e8a994aa3c46-1 {}] regionserver.HStore(327): Store=d67e4a55402e756819e4e8a994aa3c46/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:27:59,248 INFO [StoreOpener-d67e4a55402e756819e4e8a994aa3c46-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:27:59,249 INFO [StoreOpener-d67e4a55402e756819e4e8a994aa3c46-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:27:59,249 INFO [StoreOpener-d67e4a55402e756819e4e8a994aa3c46-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d67e4a55402e756819e4e8a994aa3c46 columnFamilyName C 2024-12-12T16:27:59,250 DEBUG [StoreOpener-d67e4a55402e756819e4e8a994aa3c46-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:27:59,250 INFO [StoreOpener-d67e4a55402e756819e4e8a994aa3c46-1 {}] regionserver.HStore(327): Store=d67e4a55402e756819e4e8a994aa3c46/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:27:59,250 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:27:59,251 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:27:59,251 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:27:59,252 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T16:27:59,253 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1085): writing seq id for d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:27:59,255 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T16:27:59,256 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1102): Opened d67e4a55402e756819e4e8a994aa3c46; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72009823, jitterRate=0.07302998006343842}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T16:27:59,256 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1001): Region open journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:27:59,257 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., pid=70, masterSystemTime=1734020879239 2024-12-12T16:27:59,258 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:27:59,258 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:27:59,259 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=d67e4a55402e756819e4e8a994aa3c46, regionState=OPEN, openSeqNum=2, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:27:59,261 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-12T16:27:59,261 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; OpenRegionProcedure d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 in 171 msec 2024-12-12T16:27:59,262 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-12-12T16:27:59,262 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d67e4a55402e756819e4e8a994aa3c46, ASSIGN in 326 msec 2024-12-12T16:27:59,263 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T16:27:59,263 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020879263"}]},"ts":"1734020879263"} 2024-12-12T16:27:59,264 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T16:27:59,266 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T16:27:59,267 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1550 sec 2024-12-12T16:28:00,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-12T16:28:00,218 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 68 completed 2024-12-12T16:28:00,220 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72aa9ee5 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d296fed 2024-12-12T16:28:00,223 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c480dfb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:00,224 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:00,226 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55042, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:00,227 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T16:28:00,228 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56274, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T16:28:00,230 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4ec09297 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8d0caa5 2024-12-12T16:28:00,232 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34cb3991, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:00,234 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4dfb20f6 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@43f04e0e 2024-12-12T16:28:00,236 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e9ae050, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:00,237 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17cf7fc0 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@560ec309 2024-12-12T16:28:00,240 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fef31f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:00,241 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78b04266 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5886c0f2 2024-12-12T16:28:00,244 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@eb04aeb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:00,245 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x088aa519 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@66e575aa 2024-12-12T16:28:00,248 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a0e9c8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:00,249 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x131ceb8f to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d68f787 2024-12-12T16:28:00,252 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c299cfb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:00,254 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5a78bf6d to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@10e6bf6a 2024-12-12T16:28:00,256 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@605827c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:00,258 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x328852db to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1730a60f 2024-12-12T16:28:00,260 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3677bd4f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:00,261 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4b9e2976 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@598cfed4 2024-12-12T16:28:00,265 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@521aad6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:00,266 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x56e9a678 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68ad882f 2024-12-12T16:28:00,270 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f5b2180, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:00,280 DEBUG [hconnection-0x544b4ca9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:00,280 DEBUG [hconnection-0x10c457ad-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:00,280 DEBUG [hconnection-0x51cc30a9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:00,281 DEBUG [hconnection-0x4bd2d1a8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:00,281 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55048, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:00,281 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55052, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:00,281 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55050, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:00,282 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55088, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:00,288 DEBUG [hconnection-0x44629e83-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:00,288 DEBUG [hconnection-0x4c11283-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:00,289 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55092, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:00,290 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55096, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:00,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:00,290 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:28:00,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:00,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:00,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:00,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:00,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:00,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:00,296 DEBUG [hconnection-0xe977f31-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:00,300 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55110, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:00,305 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:28:00,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-12-12T16:28:00,307 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:28:00,307 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:28:00,307 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:28:00,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T16:28:00,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55052 deadline: 1734020940310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55096 deadline: 1734020940310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020940312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,313 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020940312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,316 DEBUG [hconnection-0x2bb79fa6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:00,317 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55120, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:00,344 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/71a9db9776274381ba885bc1033bcbac is 50, key is test_row_0/A:col10/1734020880289/Put/seqid=0 2024-12-12T16:28:00,344 DEBUG [hconnection-0x4415f95a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:00,346 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55124, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:00,352 DEBUG [hconnection-0x27075dba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:00,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020940351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,355 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55140, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:00,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742079_1255 (size=12001) 2024-12-12T16:28:00,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/71a9db9776274381ba885bc1033bcbac 2024-12-12T16:28:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T16:28:00,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020940413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55052 deadline: 1734020940413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55096 deadline: 1734020940414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020940415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/bd08930afd7e492bb1e03ba416f5dcdb is 50, key is test_row_0/B:col10/1734020880289/Put/seqid=0 2024-12-12T16:28:00,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742080_1256 (size=12001) 2024-12-12T16:28:00,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020940453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,460 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,460 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-12T16:28:00,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:00,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:00,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:00,460 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:00,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:00,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:00,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T16:28:00,614 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-12T16:28:00,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:00,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:00,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:00,615 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:00,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:00,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:00,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020940616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55052 deadline: 1734020940617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55096 deadline: 1734020940617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020940624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020940666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,769 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,770 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-12T16:28:00,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:00,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:00,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:00,771 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:00,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:00,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:00,840 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/bd08930afd7e492bb1e03ba416f5dcdb 2024-12-12T16:28:00,874 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/75616725e61e412ba3997e35a8a1f610 is 50, key is test_row_0/C:col10/1734020880289/Put/seqid=0 2024-12-12T16:28:00,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742081_1257 (size=12001) 2024-12-12T16:28:00,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T16:28:00,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020940919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55096 deadline: 1734020940920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,924 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,924 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-12T16:28:00,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:00,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:00,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:00,925 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:00,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:00,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55052 deadline: 1734020940925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:00,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020940928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:00,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:00,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020940970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:01,078 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:01,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-12T16:28:01,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:01,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:01,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:01,079 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:01,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:01,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:01,231 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:01,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-12T16:28:01,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:01,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:01,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:01,232 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:01,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:01,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:01,295 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/75616725e61e412ba3997e35a8a1f610 2024-12-12T16:28:01,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/71a9db9776274381ba885bc1033bcbac as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/71a9db9776274381ba885bc1033bcbac 2024-12-12T16:28:01,306 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/71a9db9776274381ba885bc1033bcbac, entries=150, sequenceid=12, filesize=11.7 K 2024-12-12T16:28:01,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/bd08930afd7e492bb1e03ba416f5dcdb as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/bd08930afd7e492bb1e03ba416f5dcdb 2024-12-12T16:28:01,313 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/bd08930afd7e492bb1e03ba416f5dcdb, entries=150, sequenceid=12, filesize=11.7 K 2024-12-12T16:28:01,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/75616725e61e412ba3997e35a8a1f610 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/75616725e61e412ba3997e35a8a1f610 2024-12-12T16:28:01,318 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/75616725e61e412ba3997e35a8a1f610, entries=150, sequenceid=12, filesize=11.7 K 2024-12-12T16:28:01,320 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for d67e4a55402e756819e4e8a994aa3c46 in 1030ms, sequenceid=12, compaction requested=false 2024-12-12T16:28:01,320 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-12T16:28:01,321 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:01,384 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:01,385 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-12T16:28:01,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:01,385 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T16:28:01,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:01,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:01,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:01,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:01,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:01,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:01,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/5b1d1adb837145258647a786077b9038 is 50, key is test_row_0/A:col10/1734020880305/Put/seqid=0 2024-12-12T16:28:01,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T16:28:01,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:01,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:01,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742082_1258 (size=12001) 2024-12-12T16:28:01,435 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/5b1d1adb837145258647a786077b9038 2024-12-12T16:28:01,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:01,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020941433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:01,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:01,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020941434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:01,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:01,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:01,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55052 deadline: 1734020941436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:01,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55096 deadline: 1734020941436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:01,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/6495ab16f29c443f93649a24e74040a4 is 50, key is test_row_0/B:col10/1734020880305/Put/seqid=0 2024-12-12T16:28:01,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742083_1259 (size=12001) 2024-12-12T16:28:01,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:01,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020941475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:01,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:01,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020941537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:01,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:01,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55096 deadline: 1734020941539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:01,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:01,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55052 deadline: 1734020941540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:01,703 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T16:28:01,742 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:01,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020941741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:01,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:01,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55052 deadline: 1734020941743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:01,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:01,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55096 deadline: 1734020941743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:01,860 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/6495ab16f29c443f93649a24e74040a4 2024-12-12T16:28:01,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/ac26b4da84144feda68b3dc5b87cf883 is 50, key is test_row_0/C:col10/1734020880305/Put/seqid=0 2024-12-12T16:28:01,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742084_1260 (size=12001) 2024-12-12T16:28:02,044 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:02,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020942044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:02,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:02,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55052 deadline: 1734020942046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:02,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:02,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55096 deadline: 1734020942047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:02,273 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/ac26b4da84144feda68b3dc5b87cf883 2024-12-12T16:28:02,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/5b1d1adb837145258647a786077b9038 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/5b1d1adb837145258647a786077b9038 2024-12-12T16:28:02,284 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/5b1d1adb837145258647a786077b9038, entries=150, sequenceid=38, filesize=11.7 K 2024-12-12T16:28:02,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/6495ab16f29c443f93649a24e74040a4 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/6495ab16f29c443f93649a24e74040a4 2024-12-12T16:28:02,290 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/6495ab16f29c443f93649a24e74040a4, entries=150, sequenceid=38, filesize=11.7 K 2024-12-12T16:28:02,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/ac26b4da84144feda68b3dc5b87cf883 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/ac26b4da84144feda68b3dc5b87cf883 2024-12-12T16:28:02,295 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/ac26b4da84144feda68b3dc5b87cf883, entries=150, sequenceid=38, filesize=11.7 K 2024-12-12T16:28:02,296 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for d67e4a55402e756819e4e8a994aa3c46 in 911ms, sequenceid=38, compaction requested=false 2024-12-12T16:28:02,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:02,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:02,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-12T16:28:02,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-12T16:28:02,298 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-12T16:28:02,299 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9900 sec 2024-12-12T16:28:02,300 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.9940 sec 2024-12-12T16:28:02,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T16:28:02,414 INFO [Thread-1178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-12T16:28:02,415 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:28:02,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-12-12T16:28:02,417 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:28:02,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T16:28:02,418 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:28:02,418 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:28:02,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:02,439 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T16:28:02,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:02,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:02,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:02,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:02,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:02,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:02,445 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/820d83d7d0b04bea9d638bc2040c1e44 is 50, key is test_row_0/A:col10/1734020881433/Put/seqid=0 2024-12-12T16:28:02,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742085_1261 (size=12001) 2024-12-12T16:28:02,451 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/820d83d7d0b04bea9d638bc2040c1e44 2024-12-12T16:28:02,472 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/4ef1bcd6c00144ec91a5ea709cbd33a5 is 50, key is test_row_0/B:col10/1734020881433/Put/seqid=0 2024-12-12T16:28:02,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742086_1262 (size=12001) 2024-12-12T16:28:02,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:02,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020942489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:02,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:02,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020942491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:02,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T16:28:02,548 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:02,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020942547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:02,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:02,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55096 deadline: 1734020942550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:02,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:02,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55052 deadline: 1734020942552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:02,569 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:02,570 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-12T16:28:02,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:02,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:02,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:02,570 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:02,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:02,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:02,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:02,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020942592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:02,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:02,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020942594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:02,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T16:28:02,722 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:02,723 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-12T16:28:02,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:02,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:02,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:02,723 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:02,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:02,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:02,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:02,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020942795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:02,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:02,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020942798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:02,875 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:02,876 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-12T16:28:02,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:02,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:02,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:02,876 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:02,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:02,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:02,883 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/4ef1bcd6c00144ec91a5ea709cbd33a5 2024-12-12T16:28:02,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/6f14e45de1e04d7ebb8597edaacba141 is 50, key is test_row_0/C:col10/1734020881433/Put/seqid=0 2024-12-12T16:28:02,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742087_1263 (size=12001) 2024-12-12T16:28:02,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/6f14e45de1e04d7ebb8597edaacba141 2024-12-12T16:28:02,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/820d83d7d0b04bea9d638bc2040c1e44 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/820d83d7d0b04bea9d638bc2040c1e44 2024-12-12T16:28:02,945 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/820d83d7d0b04bea9d638bc2040c1e44, entries=150, sequenceid=50, filesize=11.7 K 2024-12-12T16:28:02,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/4ef1bcd6c00144ec91a5ea709cbd33a5 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/4ef1bcd6c00144ec91a5ea709cbd33a5 2024-12-12T16:28:02,950 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/4ef1bcd6c00144ec91a5ea709cbd33a5, entries=150, sequenceid=50, filesize=11.7 K 2024-12-12T16:28:02,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/6f14e45de1e04d7ebb8597edaacba141 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6f14e45de1e04d7ebb8597edaacba141 2024-12-12T16:28:02,956 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6f14e45de1e04d7ebb8597edaacba141, entries=150, sequenceid=50, filesize=11.7 K 2024-12-12T16:28:02,957 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d67e4a55402e756819e4e8a994aa3c46 in 518ms, sequenceid=50, compaction requested=true 2024-12-12T16:28:02,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:02,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:28:02,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:02,958 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:02,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:28:02,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:02,958 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:02,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:28:02,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:02,959 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:02,959 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/A is initiating minor compaction (all files) 2024-12-12T16:28:02,959 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/A in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:02,959 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:02,959 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/71a9db9776274381ba885bc1033bcbac, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/5b1d1adb837145258647a786077b9038, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/820d83d7d0b04bea9d638bc2040c1e44] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=35.2 K 2024-12-12T16:28:02,959 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/B is initiating minor compaction (all files) 2024-12-12T16:28:02,959 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/B in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:02,959 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/bd08930afd7e492bb1e03ba416f5dcdb, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/6495ab16f29c443f93649a24e74040a4, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/4ef1bcd6c00144ec91a5ea709cbd33a5] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=35.2 K 2024-12-12T16:28:02,959 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71a9db9776274381ba885bc1033bcbac, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1734020880286 2024-12-12T16:28:02,960 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting bd08930afd7e492bb1e03ba416f5dcdb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1734020880286 2024-12-12T16:28:02,960 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b1d1adb837145258647a786077b9038, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734020880305 2024-12-12T16:28:02,960 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 6495ab16f29c443f93649a24e74040a4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734020880305 2024-12-12T16:28:02,960 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 820d83d7d0b04bea9d638bc2040c1e44, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1734020881433 2024-12-12T16:28:02,961 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ef1bcd6c00144ec91a5ea709cbd33a5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1734020881433 2024-12-12T16:28:02,968 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#A#compaction#216 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:02,968 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/698e99fbf5f34ce282bda410d0e60fe1 is 50, key is test_row_0/A:col10/1734020881433/Put/seqid=0 2024-12-12T16:28:02,969 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#B#compaction#217 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:02,969 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/70e78de994ff44d6a7d6265d8c9c7999 is 50, key is test_row_0/B:col10/1734020881433/Put/seqid=0 2024-12-12T16:28:02,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742088_1264 (size=12104) 2024-12-12T16:28:03,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742089_1265 (size=12104) 2024-12-12T16:28:03,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T16:28:03,029 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:03,029 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-12T16:28:03,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:03,030 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T16:28:03,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:03,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:03,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:03,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:03,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:03,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:03,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/1a4afef5f32f49ac97fb36edb4456f88 is 50, key is test_row_0/A:col10/1734020882485/Put/seqid=0 2024-12-12T16:28:03,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742090_1266 (size=12001) 2024-12-12T16:28:03,041 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/1a4afef5f32f49ac97fb36edb4456f88 2024-12-12T16:28:03,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/e84be560e6644ce1ad0a074ad8c38ad9 is 50, key is test_row_0/B:col10/1734020882485/Put/seqid=0 2024-12-12T16:28:03,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742091_1267 (size=12001) 2024-12-12T16:28:03,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:03,103 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:03,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:03,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:03,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020943117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:03,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020943117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:03,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:03,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020943221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:03,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:03,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020943221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:03,396 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/698e99fbf5f34ce282bda410d0e60fe1 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/698e99fbf5f34ce282bda410d0e60fe1 2024-12-12T16:28:03,401 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/A of d67e4a55402e756819e4e8a994aa3c46 into 698e99fbf5f34ce282bda410d0e60fe1(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:03,401 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:03,401 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/A, priority=13, startTime=1734020882957; duration=0sec 2024-12-12T16:28:03,401 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:03,401 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:A 2024-12-12T16:28:03,401 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:03,403 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:03,403 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/C is initiating minor compaction (all files) 2024-12-12T16:28:03,403 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/C in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:03,403 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/75616725e61e412ba3997e35a8a1f610, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/ac26b4da84144feda68b3dc5b87cf883, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6f14e45de1e04d7ebb8597edaacba141] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=35.2 K 2024-12-12T16:28:03,404 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75616725e61e412ba3997e35a8a1f610, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1734020880286 2024-12-12T16:28:03,404 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac26b4da84144feda68b3dc5b87cf883, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734020880305 2024-12-12T16:28:03,404 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f14e45de1e04d7ebb8597edaacba141, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1734020881433 2024-12-12T16:28:03,408 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/70e78de994ff44d6a7d6265d8c9c7999 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/70e78de994ff44d6a7d6265d8c9c7999 2024-12-12T16:28:03,414 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/B of d67e4a55402e756819e4e8a994aa3c46 into 70e78de994ff44d6a7d6265d8c9c7999(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:03,415 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:03,415 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/B, priority=13, startTime=1734020882958; duration=0sec 2024-12-12T16:28:03,415 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:03,415 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:B 2024-12-12T16:28:03,423 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#C#compaction#220 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:03,424 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/03b7ed85d0aa4b76b6a66574d9eef649 is 50, key is test_row_0/C:col10/1734020881433/Put/seqid=0 2024-12-12T16:28:03,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:03,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020943425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:03,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:03,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020943425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:03,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742092_1268 (size=12104) 2024-12-12T16:28:03,467 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/e84be560e6644ce1ad0a074ad8c38ad9 2024-12-12T16:28:03,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/ff8ca45548f247d1bd8c981b78bee8a9 is 50, key is test_row_0/C:col10/1734020882485/Put/seqid=0 2024-12-12T16:28:03,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742093_1269 (size=12001) 2024-12-12T16:28:03,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T16:28:03,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:03,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020943552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:03,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:03,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55096 deadline: 1734020943553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:03,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:03,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55052 deadline: 1734020943557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:03,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:03,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020943727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:03,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:03,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020943727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:03,845 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/03b7ed85d0aa4b76b6a66574d9eef649 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/03b7ed85d0aa4b76b6a66574d9eef649 2024-12-12T16:28:03,851 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/C of d67e4a55402e756819e4e8a994aa3c46 into 03b7ed85d0aa4b76b6a66574d9eef649(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:03,851 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:03,851 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/C, priority=13, startTime=1734020882958; duration=0sec 2024-12-12T16:28:03,851 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:03,851 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:C 2024-12-12T16:28:03,881 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/ff8ca45548f247d1bd8c981b78bee8a9 2024-12-12T16:28:03,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/1a4afef5f32f49ac97fb36edb4456f88 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/1a4afef5f32f49ac97fb36edb4456f88 2024-12-12T16:28:03,890 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/1a4afef5f32f49ac97fb36edb4456f88, entries=150, sequenceid=74, filesize=11.7 K 2024-12-12T16:28:03,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/e84be560e6644ce1ad0a074ad8c38ad9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/e84be560e6644ce1ad0a074ad8c38ad9 2024-12-12T16:28:03,896 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/e84be560e6644ce1ad0a074ad8c38ad9, entries=150, sequenceid=74, filesize=11.7 K 2024-12-12T16:28:03,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/ff8ca45548f247d1bd8c981b78bee8a9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/ff8ca45548f247d1bd8c981b78bee8a9 2024-12-12T16:28:03,903 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/ff8ca45548f247d1bd8c981b78bee8a9, entries=150, sequenceid=74, filesize=11.7 K 2024-12-12T16:28:03,905 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for d67e4a55402e756819e4e8a994aa3c46 in 875ms, sequenceid=74, compaction requested=false 2024-12-12T16:28:03,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:03,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:03,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-12-12T16:28:03,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-12-12T16:28:03,908 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-12T16:28:03,908 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4880 sec 2024-12-12T16:28:03,909 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.4930 sec 2024-12-12T16:28:04,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:04,234 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T16:28:04,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:04,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:04,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:04,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:04,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:04,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:04,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/5cc8b925372146ec83f9c2ca14ff0650 is 50, key is test_row_0/A:col10/1734020883108/Put/seqid=0 2024-12-12T16:28:04,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742094_1270 (size=12001) 2024-12-12T16:28:04,251 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/5cc8b925372146ec83f9c2ca14ff0650 2024-12-12T16:28:04,259 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/ba98728bad2c46b0908c3ec33613d603 is 50, key is test_row_0/B:col10/1734020883108/Put/seqid=0 2024-12-12T16:28:04,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742095_1271 (size=12001) 2024-12-12T16:28:04,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:04,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020944285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:04,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:04,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020944285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:04,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:04,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:04,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020944389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:04,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020944389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:04,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T16:28:04,524 INFO [Thread-1178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-12T16:28:04,525 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:28:04,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-12T16:28:04,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-12T16:28:04,528 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:28:04,528 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:28:04,528 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:28:04,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:04,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020944592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:04,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:04,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020944593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:04,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-12T16:28:04,671 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/ba98728bad2c46b0908c3ec33613d603 2024-12-12T16:28:04,679 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/70552faf2cab4087bf0381beb58b6db0 is 50, key is test_row_0/C:col10/1734020883108/Put/seqid=0 2024-12-12T16:28:04,680 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:04,682 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-12T16:28:04,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:04,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:04,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:04,682 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:04,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:04,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:04,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742096_1272 (size=12001) 2024-12-12T16:28:04,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-12T16:28:04,834 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:04,834 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-12T16:28:04,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:04,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:04,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:04,835 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:04,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:04,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:04,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:04,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020944895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:04,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:04,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020944898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:04,987 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:04,987 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-12T16:28:04,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:04,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:04,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:04,988 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:04,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:04,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:05,088 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/70552faf2cab4087bf0381beb58b6db0 2024-12-12T16:28:05,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/5cc8b925372146ec83f9c2ca14ff0650 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/5cc8b925372146ec83f9c2ca14ff0650 2024-12-12T16:28:05,100 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/5cc8b925372146ec83f9c2ca14ff0650, entries=150, sequenceid=91, filesize=11.7 K 2024-12-12T16:28:05,101 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/ba98728bad2c46b0908c3ec33613d603 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ba98728bad2c46b0908c3ec33613d603 2024-12-12T16:28:05,105 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ba98728bad2c46b0908c3ec33613d603, entries=150, sequenceid=91, filesize=11.7 K 2024-12-12T16:28:05,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/70552faf2cab4087bf0381beb58b6db0 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/70552faf2cab4087bf0381beb58b6db0 2024-12-12T16:28:05,111 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/70552faf2cab4087bf0381beb58b6db0, entries=150, sequenceid=91, filesize=11.7 K 2024-12-12T16:28:05,112 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for d67e4a55402e756819e4e8a994aa3c46 in 878ms, sequenceid=91, compaction requested=true 2024-12-12T16:28:05,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:05,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:28:05,112 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:05,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:05,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:28:05,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:05,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:28:05,112 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:05,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:05,114 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:05,114 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:05,114 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/A is initiating minor compaction (all files) 2024-12-12T16:28:05,114 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/B is initiating minor compaction (all files) 2024-12-12T16:28:05,114 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/A in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:05,114 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/B in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:05,114 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/698e99fbf5f34ce282bda410d0e60fe1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/1a4afef5f32f49ac97fb36edb4456f88, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/5cc8b925372146ec83f9c2ca14ff0650] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=35.3 K 2024-12-12T16:28:05,114 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/70e78de994ff44d6a7d6265d8c9c7999, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/e84be560e6644ce1ad0a074ad8c38ad9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ba98728bad2c46b0908c3ec33613d603] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=35.3 K 2024-12-12T16:28:05,115 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 70e78de994ff44d6a7d6265d8c9c7999, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1734020881433 2024-12-12T16:28:05,115 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 698e99fbf5f34ce282bda410d0e60fe1, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1734020881433 2024-12-12T16:28:05,115 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting e84be560e6644ce1ad0a074ad8c38ad9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1734020882485 2024-12-12T16:28:05,115 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a4afef5f32f49ac97fb36edb4456f88, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1734020882485 2024-12-12T16:28:05,116 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting ba98728bad2c46b0908c3ec33613d603, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734020883108 2024-12-12T16:28:05,116 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5cc8b925372146ec83f9c2ca14ff0650, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734020883108 2024-12-12T16:28:05,124 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#A#compaction#225 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:05,125 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#B#compaction#226 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:05,125 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/5d4dd3ce01f547a7bb1117d1b5e3f035 is 50, key is test_row_0/A:col10/1734020883108/Put/seqid=0 2024-12-12T16:28:05,125 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/8e504172c6104fe8bb7988430c8ed62a is 50, key is test_row_0/B:col10/1734020883108/Put/seqid=0 2024-12-12T16:28:05,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742097_1273 (size=12207) 2024-12-12T16:28:05,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-12T16:28:05,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742098_1274 (size=12207) 2024-12-12T16:28:05,136 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/5d4dd3ce01f547a7bb1117d1b5e3f035 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/5d4dd3ce01f547a7bb1117d1b5e3f035 2024-12-12T16:28:05,140 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:05,141 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-12T16:28:05,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:05,141 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T16:28:05,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:05,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:05,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:05,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:05,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:05,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:05,142 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/A of d67e4a55402e756819e4e8a994aa3c46 into 5d4dd3ce01f547a7bb1117d1b5e3f035(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:05,142 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:05,142 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/A, priority=13, startTime=1734020885112; duration=0sec 2024-12-12T16:28:05,142 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:05,142 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:A 2024-12-12T16:28:05,143 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:05,147 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:05,147 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/C is initiating minor compaction (all files) 2024-12-12T16:28:05,147 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/C in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:05,148 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/03b7ed85d0aa4b76b6a66574d9eef649, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/ff8ca45548f247d1bd8c981b78bee8a9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/70552faf2cab4087bf0381beb58b6db0] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=35.3 K 2024-12-12T16:28:05,148 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03b7ed85d0aa4b76b6a66574d9eef649, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1734020881433 2024-12-12T16:28:05,148 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff8ca45548f247d1bd8c981b78bee8a9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1734020882485 2024-12-12T16:28:05,149 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70552faf2cab4087bf0381beb58b6db0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734020883108 2024-12-12T16:28:05,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/d1e52be9744e44ecb0a55e39a4304ea4 is 50, key is test_row_0/A:col10/1734020884253/Put/seqid=0 2024-12-12T16:28:05,166 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#C#compaction#228 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:05,167 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/6605ac8483d34b369a9ebd3840178d75 is 50, key is test_row_0/C:col10/1734020883108/Put/seqid=0 2024-12-12T16:28:05,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742099_1275 (size=12001) 2024-12-12T16:28:05,171 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/d1e52be9744e44ecb0a55e39a4304ea4 2024-12-12T16:28:05,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/7038944329ed4b358c191a0f17e9c792 is 50, key is test_row_0/B:col10/1734020884253/Put/seqid=0 2024-12-12T16:28:05,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742100_1276 (size=12207) 2024-12-12T16:28:05,196 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/6605ac8483d34b369a9ebd3840178d75 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6605ac8483d34b369a9ebd3840178d75 2024-12-12T16:28:05,200 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/C of d67e4a55402e756819e4e8a994aa3c46 into 6605ac8483d34b369a9ebd3840178d75(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:05,201 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:05,201 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/C, priority=13, startTime=1734020885112; duration=0sec 2024-12-12T16:28:05,201 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:05,201 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:C 2024-12-12T16:28:05,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742101_1277 (size=12001) 2024-12-12T16:28:05,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:05,405 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:05,419 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:05,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020945418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:05,420 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:05,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020945419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:05,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:05,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020945520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:05,523 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:05,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020945521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:05,536 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/8e504172c6104fe8bb7988430c8ed62a as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/8e504172c6104fe8bb7988430c8ed62a 2024-12-12T16:28:05,542 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/B of d67e4a55402e756819e4e8a994aa3c46 into 8e504172c6104fe8bb7988430c8ed62a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:05,542 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:05,542 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/B, priority=13, startTime=1734020885112; duration=0sec 2024-12-12T16:28:05,542 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:05,542 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:B 2024-12-12T16:28:05,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:05,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020945558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:05,559 DEBUG [Thread-1172 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4126 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:28:05,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:05,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55052 deadline: 1734020945565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:05,567 DEBUG [Thread-1174 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4131 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:28:05,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:05,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55096 deadline: 1734020945572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:05,574 DEBUG [Thread-1168 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:28:05,603 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/7038944329ed4b358c191a0f17e9c792 2024-12-12T16:28:05,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/39461245250343ad96fb91c423f3a375 is 50, key is test_row_0/C:col10/1734020884253/Put/seqid=0 2024-12-12T16:28:05,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742102_1278 (size=12001) 2024-12-12T16:28:05,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-12T16:28:05,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:05,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020945723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:05,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:05,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020945725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:06,022 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/39461245250343ad96fb91c423f3a375 2024-12-12T16:28:06,026 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:06,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/d1e52be9744e44ecb0a55e39a4304ea4 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/d1e52be9744e44ecb0a55e39a4304ea4 2024-12-12T16:28:06,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020946026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:06,031 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/d1e52be9744e44ecb0a55e39a4304ea4, entries=150, sequenceid=114, filesize=11.7 K 2024-12-12T16:28:06,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:06,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020946030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:06,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/7038944329ed4b358c191a0f17e9c792 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/7038944329ed4b358c191a0f17e9c792 2024-12-12T16:28:06,036 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/7038944329ed4b358c191a0f17e9c792, entries=150, sequenceid=114, filesize=11.7 K 2024-12-12T16:28:06,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/39461245250343ad96fb91c423f3a375 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/39461245250343ad96fb91c423f3a375 2024-12-12T16:28:06,040 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/39461245250343ad96fb91c423f3a375, entries=150, sequenceid=114, filesize=11.7 K 2024-12-12T16:28:06,042 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for d67e4a55402e756819e4e8a994aa3c46 in 900ms, sequenceid=114, compaction requested=false 2024-12-12T16:28:06,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:06,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:06,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-12T16:28:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-12T16:28:06,045 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-12T16:28:06,045 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5150 sec 2024-12-12T16:28:06,046 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.5200 sec 2024-12-12T16:28:06,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:06,533 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-12T16:28:06,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:06,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:06,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:06,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:06,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:06,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:06,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/c7931ad908e74f3e95051c8ba9b7f8e3 is 50, key is test_row_0/A:col10/1734020886531/Put/seqid=0 2024-12-12T16:28:06,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742103_1279 (size=12051) 2024-12-12T16:28:06,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:06,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020946561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:06,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:06,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020946563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:06,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-12T16:28:06,632 INFO [Thread-1178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-12T16:28:06,633 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:28:06,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-12T16:28:06,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T16:28:06,635 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:28:06,635 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:28:06,636 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:28:06,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:06,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020946666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:06,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:06,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020946667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:06,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T16:28:06,788 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:06,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-12T16:28:06,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:06,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:06,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:06,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:06,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:06,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:06,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:06,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020946868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:06,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:06,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020946869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:06,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T16:28:06,941 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:06,942 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-12T16:28:06,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:06,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:06,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:06,942 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:06,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:06,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:06,963 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/c7931ad908e74f3e95051c8ba9b7f8e3 2024-12-12T16:28:06,970 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/720b31a1b9c2426aa6abf73bcefb2521 is 50, key is test_row_0/B:col10/1734020886531/Put/seqid=0 2024-12-12T16:28:06,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742104_1280 (size=12051) 2024-12-12T16:28:07,094 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:07,095 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-12T16:28:07,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:07,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:07,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:07,095 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:07,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:07,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:07,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:07,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020947170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:07,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:07,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020947173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:07,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T16:28:07,247 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:07,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-12T16:28:07,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:07,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:07,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:07,248 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:07,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:07,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:07,375 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/720b31a1b9c2426aa6abf73bcefb2521 2024-12-12T16:28:07,386 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/d7aaf0fa4a54429c88210a4774bedfd0 is 50, key is test_row_0/C:col10/1734020886531/Put/seqid=0 2024-12-12T16:28:07,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742105_1281 (size=12051) 2024-12-12T16:28:07,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/d7aaf0fa4a54429c88210a4774bedfd0 2024-12-12T16:28:07,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/c7931ad908e74f3e95051c8ba9b7f8e3 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c7931ad908e74f3e95051c8ba9b7f8e3 2024-12-12T16:28:07,401 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:07,401 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-12T16:28:07,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:07,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:07,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:07,402 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:07,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:07,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:07,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c7931ad908e74f3e95051c8ba9b7f8e3, entries=150, sequenceid=131, filesize=11.8 K 2024-12-12T16:28:07,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/720b31a1b9c2426aa6abf73bcefb2521 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/720b31a1b9c2426aa6abf73bcefb2521 2024-12-12T16:28:07,412 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/720b31a1b9c2426aa6abf73bcefb2521, entries=150, sequenceid=131, filesize=11.8 K 2024-12-12T16:28:07,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/d7aaf0fa4a54429c88210a4774bedfd0 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/d7aaf0fa4a54429c88210a4774bedfd0 2024-12-12T16:28:07,419 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/d7aaf0fa4a54429c88210a4774bedfd0, entries=150, sequenceid=131, filesize=11.8 K 2024-12-12T16:28:07,420 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for d67e4a55402e756819e4e8a994aa3c46 in 887ms, sequenceid=131, compaction requested=true 2024-12-12T16:28:07,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:07,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:28:07,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:07,420 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:07,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:28:07,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:07,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:28:07,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:07,420 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:07,422 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:07,422 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/B is initiating minor compaction (all files) 2024-12-12T16:28:07,422 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/B in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:07,422 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/8e504172c6104fe8bb7988430c8ed62a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/7038944329ed4b358c191a0f17e9c792, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/720b31a1b9c2426aa6abf73bcefb2521] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=35.4 K 2024-12-12T16:28:07,422 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:07,422 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/A is initiating minor compaction (all files) 2024-12-12T16:28:07,422 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/A in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:07,423 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/5d4dd3ce01f547a7bb1117d1b5e3f035, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/d1e52be9744e44ecb0a55e39a4304ea4, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c7931ad908e74f3e95051c8ba9b7f8e3] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=35.4 K 2024-12-12T16:28:07,423 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e504172c6104fe8bb7988430c8ed62a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734020883108 2024-12-12T16:28:07,424 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d4dd3ce01f547a7bb1117d1b5e3f035, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734020883108 2024-12-12T16:28:07,424 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 7038944329ed4b358c191a0f17e9c792, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1734020884253 2024-12-12T16:28:07,424 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1e52be9744e44ecb0a55e39a4304ea4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1734020884253 2024-12-12T16:28:07,424 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 720b31a1b9c2426aa6abf73bcefb2521, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734020885410 2024-12-12T16:28:07,425 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7931ad908e74f3e95051c8ba9b7f8e3, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734020885410 2024-12-12T16:28:07,442 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#B#compaction#234 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:07,443 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/aac403d475804d0188c321599707f5ef is 50, key is test_row_0/B:col10/1734020886531/Put/seqid=0 2024-12-12T16:28:07,446 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#A#compaction#235 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:07,446 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/8ce4082439934c2f91ad2e3ea9786417 is 50, key is test_row_0/A:col10/1734020886531/Put/seqid=0 2024-12-12T16:28:07,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742106_1282 (size=12359) 2024-12-12T16:28:07,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742107_1283 (size=12359) 2024-12-12T16:28:07,461 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/aac403d475804d0188c321599707f5ef as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/aac403d475804d0188c321599707f5ef 2024-12-12T16:28:07,467 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/B of d67e4a55402e756819e4e8a994aa3c46 into aac403d475804d0188c321599707f5ef(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:07,467 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:07,468 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/B, priority=13, startTime=1734020887420; duration=0sec 2024-12-12T16:28:07,468 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:07,468 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:B 2024-12-12T16:28:07,468 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:07,470 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:07,470 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/C is initiating minor compaction (all files) 2024-12-12T16:28:07,470 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/C in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:07,470 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6605ac8483d34b369a9ebd3840178d75, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/39461245250343ad96fb91c423f3a375, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/d7aaf0fa4a54429c88210a4774bedfd0] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=35.4 K 2024-12-12T16:28:07,471 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 6605ac8483d34b369a9ebd3840178d75, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734020883108 2024-12-12T16:28:07,471 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 39461245250343ad96fb91c423f3a375, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1734020884253 2024-12-12T16:28:07,472 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting d7aaf0fa4a54429c88210a4774bedfd0, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734020885410 2024-12-12T16:28:07,512 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#C#compaction#236 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:07,513 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/65fc487e454d469abfec1aad0993d1ff is 50, key is test_row_0/C:col10/1734020886531/Put/seqid=0 2024-12-12T16:28:07,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742108_1284 (size=12359) 2024-12-12T16:28:07,533 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/65fc487e454d469abfec1aad0993d1ff as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/65fc487e454d469abfec1aad0993d1ff 2024-12-12T16:28:07,539 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/C of d67e4a55402e756819e4e8a994aa3c46 into 65fc487e454d469abfec1aad0993d1ff(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:07,539 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:07,539 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/C, priority=13, startTime=1734020887420; duration=0sec 2024-12-12T16:28:07,539 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:07,539 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:C 2024-12-12T16:28:07,554 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:07,555 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-12T16:28:07,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:07,555 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-12T16:28:07,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:07,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:07,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:07,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:07,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:07,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:07,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/c52b5a51f1e9434dab076c2de9466afc is 50, key is test_row_0/A:col10/1734020886561/Put/seqid=0 2024-12-12T16:28:07,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742109_1285 (size=12151) 2024-12-12T16:28:07,580 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/c52b5a51f1e9434dab076c2de9466afc 2024-12-12T16:28:07,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/222990dfb4f240daae23b501b70c4a3f is 50, key is test_row_0/B:col10/1734020886561/Put/seqid=0 2024-12-12T16:28:07,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742110_1286 (size=12151) 2024-12-12T16:28:07,615 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/222990dfb4f240daae23b501b70c4a3f 2024-12-12T16:28:07,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/87d7fb1636b64489a9801da8871ec36b is 50, key is test_row_0/C:col10/1734020886561/Put/seqid=0 2024-12-12T16:28:07,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742111_1287 (size=12151) 2024-12-12T16:28:07,651 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/87d7fb1636b64489a9801da8871ec36b 2024-12-12T16:28:07,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/c52b5a51f1e9434dab076c2de9466afc as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c52b5a51f1e9434dab076c2de9466afc 2024-12-12T16:28:07,664 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c52b5a51f1e9434dab076c2de9466afc, entries=150, sequenceid=154, filesize=11.9 K 2024-12-12T16:28:07,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/222990dfb4f240daae23b501b70c4a3f as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/222990dfb4f240daae23b501b70c4a3f 2024-12-12T16:28:07,670 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/222990dfb4f240daae23b501b70c4a3f, entries=150, sequenceid=154, filesize=11.9 K 2024-12-12T16:28:07,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/87d7fb1636b64489a9801da8871ec36b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/87d7fb1636b64489a9801da8871ec36b 2024-12-12T16:28:07,678 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/87d7fb1636b64489a9801da8871ec36b, entries=150, sequenceid=154, filesize=11.9 K 2024-12-12T16:28:07,680 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=0 B/0 for d67e4a55402e756819e4e8a994aa3c46 in 125ms, sequenceid=154, compaction requested=false 2024-12-12T16:28:07,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:07,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:07,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-12T16:28:07,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-12T16:28:07,684 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-12T16:28:07,684 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0470 sec 2024-12-12T16:28:07,689 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.0530 sec 2024-12-12T16:28:07,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:07,693 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:28:07,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:07,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:07,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:07,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:07,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:07,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:07,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/69eef87ce6a845d3872f1f7f5e66fd8b is 50, key is test_row_0/A:col10/1734020887692/Put/seqid=0 2024-12-12T16:28:07,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T16:28:07,738 INFO [Thread-1178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-12T16:28:07,740 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:28:07,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-12T16:28:07,742 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:28:07,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T16:28:07,743 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:28:07,743 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:28:07,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:07,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020947747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:07,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:07,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020947748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:07,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742112_1288 (size=12151) 2024-12-12T16:28:07,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T16:28:07,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:07,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020947851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:07,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:07,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020947852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:07,869 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/8ce4082439934c2f91ad2e3ea9786417 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8ce4082439934c2f91ad2e3ea9786417 2024-12-12T16:28:07,876 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/A of d67e4a55402e756819e4e8a994aa3c46 into 8ce4082439934c2f91ad2e3ea9786417(size=12.1 K), total size for store is 23.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:07,876 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:07,876 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/A, priority=13, startTime=1734020887420; duration=0sec 2024-12-12T16:28:07,876 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:07,876 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:A 2024-12-12T16:28:07,895 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:07,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-12T16:28:07,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:07,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:07,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:07,896 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:07,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:07,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T16:28:08,049 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:08,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-12T16:28:08,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:08,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:08,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:08,050 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:08,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020948056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:08,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:08,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020948057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:08,158 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/69eef87ce6a845d3872f1f7f5e66fd8b 2024-12-12T16:28:08,166 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/ad66f8b7b0e8417696e98c525ce4552f is 50, key is test_row_0/B:col10/1734020887692/Put/seqid=0 2024-12-12T16:28:08,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742113_1289 (size=12151) 2024-12-12T16:28:08,202 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:08,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-12T16:28:08,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:08,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:08,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:08,203 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T16:28:08,355 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:08,356 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-12T16:28:08,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:08,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:08,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:08,356 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:08,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020948358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:08,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:08,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020948360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:08,509 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:08,509 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-12T16:28:08,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:08,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:08,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:08,510 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,578 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/ad66f8b7b0e8417696e98c525ce4552f 2024-12-12T16:28:08,585 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/8ec790e2cdb84b3eaa4ab69c0bd8193c is 50, key is test_row_0/C:col10/1734020887692/Put/seqid=0 2024-12-12T16:28:08,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742114_1290 (size=12151) 2024-12-12T16:28:08,661 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:08,661 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-12T16:28:08,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:08,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:08,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:08,662 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,814 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:08,814 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-12T16:28:08,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:08,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:08,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:08,815 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T16:28:08,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:08,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020948863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:08,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:08,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020948865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:08,967 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:08,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-12T16:28:08,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:08,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:08,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:08,968 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:08,989 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/8ec790e2cdb84b3eaa4ab69c0bd8193c 2024-12-12T16:28:08,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/69eef87ce6a845d3872f1f7f5e66fd8b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/69eef87ce6a845d3872f1f7f5e66fd8b 2024-12-12T16:28:08,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/69eef87ce6a845d3872f1f7f5e66fd8b, entries=150, sequenceid=165, filesize=11.9 K 2024-12-12T16:28:08,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/ad66f8b7b0e8417696e98c525ce4552f as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ad66f8b7b0e8417696e98c525ce4552f 2024-12-12T16:28:09,003 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ad66f8b7b0e8417696e98c525ce4552f, entries=150, sequenceid=165, filesize=11.9 K 2024-12-12T16:28:09,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/8ec790e2cdb84b3eaa4ab69c0bd8193c as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/8ec790e2cdb84b3eaa4ab69c0bd8193c 2024-12-12T16:28:09,008 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/8ec790e2cdb84b3eaa4ab69c0bd8193c, entries=150, sequenceid=165, filesize=11.9 K 2024-12-12T16:28:09,009 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for d67e4a55402e756819e4e8a994aa3c46 in 1316ms, sequenceid=165, compaction requested=true 2024-12-12T16:28:09,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:09,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:28:09,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:09,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:28:09,009 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:09,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:09,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:28:09,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:09,009 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:09,010 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:09,010 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:09,010 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/A is initiating minor compaction (all files) 2024-12-12T16:28:09,011 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/B is initiating minor compaction (all files) 2024-12-12T16:28:09,011 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/A in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:09,011 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8ce4082439934c2f91ad2e3ea9786417, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c52b5a51f1e9434dab076c2de9466afc, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/69eef87ce6a845d3872f1f7f5e66fd8b] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=35.8 K 2024-12-12T16:28:09,011 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/B in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:09,011 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/aac403d475804d0188c321599707f5ef, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/222990dfb4f240daae23b501b70c4a3f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ad66f8b7b0e8417696e98c525ce4552f] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=35.8 K 2024-12-12T16:28:09,011 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ce4082439934c2f91ad2e3ea9786417, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734020885410 2024-12-12T16:28:09,011 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting aac403d475804d0188c321599707f5ef, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734020885410 2024-12-12T16:28:09,011 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting c52b5a51f1e9434dab076c2de9466afc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734020886553 2024-12-12T16:28:09,011 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 222990dfb4f240daae23b501b70c4a3f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734020886553 2024-12-12T16:28:09,012 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting ad66f8b7b0e8417696e98c525ce4552f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1734020887686 2024-12-12T16:28:09,012 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69eef87ce6a845d3872f1f7f5e66fd8b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1734020887686 2024-12-12T16:28:09,021 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#B#compaction#243 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:09,022 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/0fc3ed8d1ada478cb4ee43df814a8d8c is 50, key is test_row_0/B:col10/1734020887692/Put/seqid=0 2024-12-12T16:28:09,030 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#A#compaction#244 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:09,030 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/f85013b9c2364097b7cf41dcb7cd591e is 50, key is test_row_0/A:col10/1734020887692/Put/seqid=0 2024-12-12T16:28:09,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742116_1292 (size=12561) 2024-12-12T16:28:09,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742115_1291 (size=12561) 2024-12-12T16:28:09,120 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:09,121 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-12T16:28:09,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:09,121 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T16:28:09,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:09,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:09,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:09,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:09,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:09,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:09,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/c77337b0112d48cf947e58271be2a8b0 is 50, key is test_row_0/A:col10/1734020887747/Put/seqid=0 2024-12-12T16:28:09,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742117_1293 (size=12151) 2024-12-12T16:28:09,216 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-12T16:28:09,216 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-12T16:28:09,457 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/f85013b9c2364097b7cf41dcb7cd591e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/f85013b9c2364097b7cf41dcb7cd591e 2024-12-12T16:28:09,464 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/0fc3ed8d1ada478cb4ee43df814a8d8c as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/0fc3ed8d1ada478cb4ee43df814a8d8c 2024-12-12T16:28:09,467 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/A of d67e4a55402e756819e4e8a994aa3c46 into f85013b9c2364097b7cf41dcb7cd591e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:09,467 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:09,467 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/A, priority=13, startTime=1734020889009; duration=0sec 2024-12-12T16:28:09,467 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:09,467 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:A 2024-12-12T16:28:09,467 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:09,468 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:09,468 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/C is initiating minor compaction (all files) 2024-12-12T16:28:09,468 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/C in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:09,468 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/65fc487e454d469abfec1aad0993d1ff, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/87d7fb1636b64489a9801da8871ec36b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/8ec790e2cdb84b3eaa4ab69c0bd8193c] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=35.8 K 2024-12-12T16:28:09,469 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 65fc487e454d469abfec1aad0993d1ff, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734020885410 2024-12-12T16:28:09,469 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87d7fb1636b64489a9801da8871ec36b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734020886553 2024-12-12T16:28:09,469 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ec790e2cdb84b3eaa4ab69c0bd8193c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1734020887686 2024-12-12T16:28:09,471 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/B of d67e4a55402e756819e4e8a994aa3c46 into 0fc3ed8d1ada478cb4ee43df814a8d8c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:09,471 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:09,471 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/B, priority=13, startTime=1734020889009; duration=0sec 2024-12-12T16:28:09,472 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:09,472 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:B 2024-12-12T16:28:09,477 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#C#compaction#246 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:09,477 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/10b0d8cb9ae749f49fa2f95d4124356e is 50, key is test_row_0/C:col10/1734020887692/Put/seqid=0 2024-12-12T16:28:09,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742118_1294 (size=12561) 2024-12-12T16:28:09,536 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/c77337b0112d48cf947e58271be2a8b0 2024-12-12T16:28:09,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/267033eb2f324537b709b3d1723b820f is 50, key is test_row_0/B:col10/1734020887747/Put/seqid=0 2024-12-12T16:28:09,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742119_1295 (size=12151) 2024-12-12T16:28:09,551 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/267033eb2f324537b709b3d1723b820f 2024-12-12T16:28:09,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/aaf44d78aa4f429c9d42bf21def57e38 is 50, key is test_row_0/C:col10/1734020887747/Put/seqid=0 2024-12-12T16:28:09,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742120_1296 (size=12151) 2024-12-12T16:28:09,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:09,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:09,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:09,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020949581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:09,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:09,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55096 deadline: 1734020949583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:09,584 DEBUG [Thread-1168 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:28:09,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:09,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55052 deadline: 1734020949595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:09,596 DEBUG [Thread-1174 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8161 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:28:09,685 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:09,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020949684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:09,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T16:28:09,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:09,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020949868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:09,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:09,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020949871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:09,887 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/10b0d8cb9ae749f49fa2f95d4124356e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/10b0d8cb9ae749f49fa2f95d4124356e 2024-12-12T16:28:09,892 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/C of d67e4a55402e756819e4e8a994aa3c46 into 10b0d8cb9ae749f49fa2f95d4124356e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:09,892 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:09,892 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/C, priority=13, startTime=1734020889009; duration=0sec 2024-12-12T16:28:09,892 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:09,892 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:C 2024-12-12T16:28:09,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:09,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020949892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:09,966 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/aaf44d78aa4f429c9d42bf21def57e38 2024-12-12T16:28:09,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/c77337b0112d48cf947e58271be2a8b0 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c77337b0112d48cf947e58271be2a8b0 2024-12-12T16:28:09,975 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c77337b0112d48cf947e58271be2a8b0, entries=150, sequenceid=191, filesize=11.9 K 2024-12-12T16:28:09,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/267033eb2f324537b709b3d1723b820f as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/267033eb2f324537b709b3d1723b820f 2024-12-12T16:28:09,980 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/267033eb2f324537b709b3d1723b820f, entries=150, sequenceid=191, filesize=11.9 K 2024-12-12T16:28:09,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/aaf44d78aa4f429c9d42bf21def57e38 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/aaf44d78aa4f429c9d42bf21def57e38 2024-12-12T16:28:09,985 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/aaf44d78aa4f429c9d42bf21def57e38, entries=150, sequenceid=191, filesize=11.9 K 2024-12-12T16:28:09,986 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d67e4a55402e756819e4e8a994aa3c46 in 865ms, sequenceid=191, compaction requested=false 2024-12-12T16:28:09,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:09,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:09,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-12T16:28:09,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-12T16:28:09,990 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-12T16:28:09,990 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2450 sec 2024-12-12T16:28:09,991 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 2.2500 sec 2024-12-12T16:28:10,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:10,196 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T16:28:10,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:10,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:10,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:10,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:10,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:10,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:10,201 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/f6bb45c48d3841b3ada8e61f2890f540 is 50, key is test_row_0/A:col10/1734020890195/Put/seqid=0 2024-12-12T16:28:10,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742121_1297 (size=12151) 2024-12-12T16:28:10,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:10,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020950255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:10,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:10,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020950358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:10,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:10,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020950561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:10,606 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/f6bb45c48d3841b3ada8e61f2890f540 2024-12-12T16:28:10,615 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/fea62d04bc4d4908b084d0af79e4b9f0 is 50, key is test_row_0/B:col10/1734020890195/Put/seqid=0 2024-12-12T16:28:10,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742122_1298 (size=12151) 2024-12-12T16:28:10,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/fea62d04bc4d4908b084d0af79e4b9f0 2024-12-12T16:28:10,627 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/ef8f17ba8d994921be3acb177715697e is 50, key is test_row_0/C:col10/1734020890195/Put/seqid=0 2024-12-12T16:28:10,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742123_1299 (size=12151) 2024-12-12T16:28:10,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:10,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020950863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:11,032 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/ef8f17ba8d994921be3acb177715697e 2024-12-12T16:28:11,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/f6bb45c48d3841b3ada8e61f2890f540 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/f6bb45c48d3841b3ada8e61f2890f540 2024-12-12T16:28:11,041 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/f6bb45c48d3841b3ada8e61f2890f540, entries=150, sequenceid=206, filesize=11.9 K 2024-12-12T16:28:11,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/fea62d04bc4d4908b084d0af79e4b9f0 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/fea62d04bc4d4908b084d0af79e4b9f0 2024-12-12T16:28:11,046 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/fea62d04bc4d4908b084d0af79e4b9f0, entries=150, sequenceid=206, filesize=11.9 K 2024-12-12T16:28:11,051 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/ef8f17ba8d994921be3acb177715697e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/ef8f17ba8d994921be3acb177715697e 2024-12-12T16:28:11,056 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/ef8f17ba8d994921be3acb177715697e, entries=150, sequenceid=206, filesize=11.9 K 2024-12-12T16:28:11,057 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d67e4a55402e756819e4e8a994aa3c46 in 861ms, sequenceid=206, compaction requested=true 2024-12-12T16:28:11,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:11,057 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:11,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:28:11,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:11,058 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:11,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:28:11,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:11,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:28:11,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:11,059 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:11,059 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/A is initiating minor compaction (all files) 2024-12-12T16:28:11,059 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/A in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:11,059 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/f85013b9c2364097b7cf41dcb7cd591e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c77337b0112d48cf947e58271be2a8b0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/f6bb45c48d3841b3ada8e61f2890f540] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=36.0 K 2024-12-12T16:28:11,059 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:11,059 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/B is initiating minor compaction (all files) 2024-12-12T16:28:11,060 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/B in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:11,060 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/0fc3ed8d1ada478cb4ee43df814a8d8c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/267033eb2f324537b709b3d1723b820f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/fea62d04bc4d4908b084d0af79e4b9f0] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=36.0 K 2024-12-12T16:28:11,060 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 0fc3ed8d1ada478cb4ee43df814a8d8c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1734020887686 2024-12-12T16:28:11,060 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting f85013b9c2364097b7cf41dcb7cd591e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1734020887686 2024-12-12T16:28:11,060 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 267033eb2f324537b709b3d1723b820f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1734020887736 2024-12-12T16:28:11,061 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting c77337b0112d48cf947e58271be2a8b0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1734020887736 2024-12-12T16:28:11,061 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting fea62d04bc4d4908b084d0af79e4b9f0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1734020889570 2024-12-12T16:28:11,061 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6bb45c48d3841b3ada8e61f2890f540, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1734020889570 2024-12-12T16:28:11,070 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#B#compaction#252 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:11,071 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/e607f6dddab94eca94fc3f7b41d69423 is 50, key is test_row_0/B:col10/1734020890195/Put/seqid=0 2024-12-12T16:28:11,075 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#A#compaction#253 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:11,076 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/8be73b53c4a94bd491548350b9952686 is 50, key is test_row_0/A:col10/1734020890195/Put/seqid=0 2024-12-12T16:28:11,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742124_1300 (size=12663) 2024-12-12T16:28:11,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742125_1301 (size=12663) 2024-12-12T16:28:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:11,367 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T16:28:11,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:11,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:11,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:11,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:11,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:11,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:11,372 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/b39cb6b0ea2f42fabc2aff3a5499a725 is 50, key is test_row_0/A:col10/1734020890254/Put/seqid=0 2024-12-12T16:28:11,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742126_1302 (size=14541) 2024-12-12T16:28:11,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:11,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020951387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:11,491 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:11,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020951489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:11,495 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/e607f6dddab94eca94fc3f7b41d69423 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/e607f6dddab94eca94fc3f7b41d69423 2024-12-12T16:28:11,500 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/B of d67e4a55402e756819e4e8a994aa3c46 into e607f6dddab94eca94fc3f7b41d69423(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:11,501 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:11,501 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/B, priority=13, startTime=1734020891058; duration=0sec 2024-12-12T16:28:11,501 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:11,501 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:B 2024-12-12T16:28:11,501 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:11,503 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:11,503 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/C is initiating minor compaction (all files) 2024-12-12T16:28:11,503 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/C in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:11,503 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/10b0d8cb9ae749f49fa2f95d4124356e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/aaf44d78aa4f429c9d42bf21def57e38, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/ef8f17ba8d994921be3acb177715697e] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=36.0 K 2024-12-12T16:28:11,503 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/8be73b53c4a94bd491548350b9952686 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8be73b53c4a94bd491548350b9952686 2024-12-12T16:28:11,503 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 10b0d8cb9ae749f49fa2f95d4124356e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1734020887686 2024-12-12T16:28:11,504 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting aaf44d78aa4f429c9d42bf21def57e38, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1734020887736 2024-12-12T16:28:11,504 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting ef8f17ba8d994921be3acb177715697e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1734020889570 2024-12-12T16:28:11,510 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/A of d67e4a55402e756819e4e8a994aa3c46 into 8be73b53c4a94bd491548350b9952686(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:11,510 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:11,510 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/A, priority=13, startTime=1734020891057; duration=0sec 2024-12-12T16:28:11,510 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:11,510 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:A 2024-12-12T16:28:11,513 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#C#compaction#255 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:11,513 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/b521c03e22fc4171a62e3263bfb7b757 is 50, key is test_row_0/C:col10/1734020890195/Put/seqid=0 2024-12-12T16:28:11,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742127_1303 (size=12663) 2024-12-12T16:28:11,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:11,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020951692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:11,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/b39cb6b0ea2f42fabc2aff3a5499a725 2024-12-12T16:28:11,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/db1117c3b266422ebee0430788726f68 is 50, key is test_row_0/B:col10/1734020890254/Put/seqid=0 2024-12-12T16:28:11,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742128_1304 (size=12151) 2024-12-12T16:28:11,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-12T16:28:11,848 INFO [Thread-1178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-12T16:28:11,849 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:28:11,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-12-12T16:28:11,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T16:28:11,851 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:28:11,851 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:28:11,852 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:28:11,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:11,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020951874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:11,875 DEBUG [Thread-1176 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4129 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:28:11,884 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:11,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020951882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:11,884 DEBUG [Thread-1170 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4136 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:28:11,926 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/b521c03e22fc4171a62e3263bfb7b757 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/b521c03e22fc4171a62e3263bfb7b757 2024-12-12T16:28:11,931 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/C of d67e4a55402e756819e4e8a994aa3c46 into b521c03e22fc4171a62e3263bfb7b757(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:11,931 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:11,932 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/C, priority=13, startTime=1734020891058; duration=0sec 2024-12-12T16:28:11,932 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:11,932 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:C 2024-12-12T16:28:11,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T16:28:11,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:11,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020951995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:12,003 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:12,004 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-12T16:28:12,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:12,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:12,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:12,004 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:12,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:12,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:12,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T16:28:12,157 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:12,157 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-12T16:28:12,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:12,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:12,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:12,158 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:12,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:12,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:12,191 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/db1117c3b266422ebee0430788726f68 2024-12-12T16:28:12,201 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/82786d3ec8704ba0b92eff66192cf911 is 50, key is test_row_0/C:col10/1734020890254/Put/seqid=0 2024-12-12T16:28:12,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742129_1305 (size=12151) 2024-12-12T16:28:12,227 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/82786d3ec8704ba0b92eff66192cf911 2024-12-12T16:28:12,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/b39cb6b0ea2f42fabc2aff3a5499a725 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/b39cb6b0ea2f42fabc2aff3a5499a725 2024-12-12T16:28:12,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/b39cb6b0ea2f42fabc2aff3a5499a725, entries=200, sequenceid=231, filesize=14.2 K 2024-12-12T16:28:12,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/db1117c3b266422ebee0430788726f68 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/db1117c3b266422ebee0430788726f68 2024-12-12T16:28:12,249 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/db1117c3b266422ebee0430788726f68, entries=150, sequenceid=231, filesize=11.9 K 2024-12-12T16:28:12,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/82786d3ec8704ba0b92eff66192cf911 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/82786d3ec8704ba0b92eff66192cf911 2024-12-12T16:28:12,256 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/82786d3ec8704ba0b92eff66192cf911, entries=150, sequenceid=231, filesize=11.9 K 2024-12-12T16:28:12,257 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d67e4a55402e756819e4e8a994aa3c46 in 890ms, sequenceid=231, compaction requested=false 2024-12-12T16:28:12,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:12,310 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:12,310 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-12T16:28:12,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:12,310 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:28:12,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:12,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:12,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:12,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:12,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:12,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:12,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/634affaea444429d94aa0cd489dde909 is 50, key is test_row_0/A:col10/1734020891369/Put/seqid=0 2024-12-12T16:28:12,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742130_1306 (size=12151) 2024-12-12T16:28:12,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T16:28:12,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:12,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:12,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:12,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020952569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:12,673 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:12,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020952672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:12,728 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/634affaea444429d94aa0cd489dde909 2024-12-12T16:28:12,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/35da8f9f921d47deb9e88339f2a61df6 is 50, key is test_row_0/B:col10/1734020891369/Put/seqid=0 2024-12-12T16:28:12,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742131_1307 (size=12151) 2024-12-12T16:28:12,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:12,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020952876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:12,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T16:28:13,141 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/35da8f9f921d47deb9e88339f2a61df6 2024-12-12T16:28:13,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/6817c5899c7b47039d86ad9726729e66 is 50, key is test_row_0/C:col10/1734020891369/Put/seqid=0 2024-12-12T16:28:13,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742132_1308 (size=12151) 2024-12-12T16:28:13,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020953179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:13,555 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/6817c5899c7b47039d86ad9726729e66 2024-12-12T16:28:13,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/634affaea444429d94aa0cd489dde909 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/634affaea444429d94aa0cd489dde909 2024-12-12T16:28:13,564 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/634affaea444429d94aa0cd489dde909, entries=150, sequenceid=245, filesize=11.9 K 2024-12-12T16:28:13,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/35da8f9f921d47deb9e88339f2a61df6 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/35da8f9f921d47deb9e88339f2a61df6 2024-12-12T16:28:13,570 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/35da8f9f921d47deb9e88339f2a61df6, entries=150, sequenceid=245, filesize=11.9 K 2024-12-12T16:28:13,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/6817c5899c7b47039d86ad9726729e66 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6817c5899c7b47039d86ad9726729e66 2024-12-12T16:28:13,575 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6817c5899c7b47039d86ad9726729e66, entries=150, sequenceid=245, filesize=11.9 K 2024-12-12T16:28:13,577 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for d67e4a55402e756819e4e8a994aa3c46 in 1267ms, sequenceid=245, compaction requested=true 2024-12-12T16:28:13,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:13,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:13,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-12-12T16:28:13,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-12-12T16:28:13,580 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-12T16:28:13,580 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7270 sec 2024-12-12T16:28:13,582 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.7320 sec 2024-12-12T16:28:13,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:13,688 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T16:28:13,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:13,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:13,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:13,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:13,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:13,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:13,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/6285c43c28824a01b04c66e9ffba8876 is 50, key is test_row_0/A:col10/1734020892561/Put/seqid=0 2024-12-12T16:28:13,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742133_1309 (size=14741) 2024-12-12T16:28:13,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:13,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020953708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:13,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:13,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020953811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:13,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T16:28:13,955 INFO [Thread-1178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-12T16:28:13,956 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:28:13,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-12-12T16:28:13,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-12T16:28:13,958 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:28:13,959 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:28:13,959 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:28:14,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:14,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020954012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:14,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-12T16:28:14,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/6285c43c28824a01b04c66e9ffba8876 2024-12-12T16:28:14,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/91f67ac4367241e7b378c0ef9ec5b1b5 is 50, key is test_row_0/B:col10/1734020892561/Put/seqid=0 2024-12-12T16:28:14,111 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:14,114 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-12T16:28:14,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:14,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:14,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:14,114 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:14,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:14,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:14,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742134_1310 (size=12301) 2024-12-12T16:28:14,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-12T16:28:14,270 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:14,271 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-12T16:28:14,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:14,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:14,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:14,271 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:14,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:14,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:14,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:14,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020954315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:14,423 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:14,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-12T16:28:14,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:14,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:14,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:14,424 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:14,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:14,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:14,517 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/91f67ac4367241e7b378c0ef9ec5b1b5 2024-12-12T16:28:14,524 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/5005bb446a4a460ea3af5fbf2d93fec1 is 50, key is test_row_0/C:col10/1734020892561/Put/seqid=0 2024-12-12T16:28:14,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742135_1311 (size=12301) 2024-12-12T16:28:14,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-12T16:28:14,576 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:14,577 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-12T16:28:14,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:14,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:14,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:14,577 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:14,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:14,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:14,730 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:14,730 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-12T16:28:14,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:14,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:14,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:14,730 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:14,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:14,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:14,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:14,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020954822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:14,883 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:14,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-12T16:28:14,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:14,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:14,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:14,884 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:14,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:14,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:14,929 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/5005bb446a4a460ea3af5fbf2d93fec1 2024-12-12T16:28:14,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/6285c43c28824a01b04c66e9ffba8876 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/6285c43c28824a01b04c66e9ffba8876 2024-12-12T16:28:14,937 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/6285c43c28824a01b04c66e9ffba8876, entries=200, sequenceid=271, filesize=14.4 K 2024-12-12T16:28:14,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/91f67ac4367241e7b378c0ef9ec5b1b5 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/91f67ac4367241e7b378c0ef9ec5b1b5 2024-12-12T16:28:14,943 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/91f67ac4367241e7b378c0ef9ec5b1b5, entries=150, sequenceid=271, filesize=12.0 K 2024-12-12T16:28:14,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/5005bb446a4a460ea3af5fbf2d93fec1 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/5005bb446a4a460ea3af5fbf2d93fec1 2024-12-12T16:28:14,948 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/5005bb446a4a460ea3af5fbf2d93fec1, entries=150, sequenceid=271, filesize=12.0 K 2024-12-12T16:28:14,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for d67e4a55402e756819e4e8a994aa3c46 in 1261ms, sequenceid=271, compaction requested=true 2024-12-12T16:28:14,949 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:14,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:28:14,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:14,950 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:28:14,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:28:14,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:14,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:28:14,950 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:28:14,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:14,951 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49266 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:28:14,951 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/B is initiating minor compaction (all files) 2024-12-12T16:28:14,952 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/B in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:14,952 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/e607f6dddab94eca94fc3f7b41d69423, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/db1117c3b266422ebee0430788726f68, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/35da8f9f921d47deb9e88339f2a61df6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/91f67ac4367241e7b378c0ef9ec5b1b5] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=48.1 K 2024-12-12T16:28:14,952 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54096 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:28:14,952 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/A is initiating minor compaction (all files) 2024-12-12T16:28:14,952 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/A in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:14,952 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8be73b53c4a94bd491548350b9952686, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/b39cb6b0ea2f42fabc2aff3a5499a725, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/634affaea444429d94aa0cd489dde909, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/6285c43c28824a01b04c66e9ffba8876] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=52.8 K 2024-12-12T16:28:14,952 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting e607f6dddab94eca94fc3f7b41d69423, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1734020889570 2024-12-12T16:28:14,953 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8be73b53c4a94bd491548350b9952686, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1734020889570 2024-12-12T16:28:14,953 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting db1117c3b266422ebee0430788726f68, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1734020890234 2024-12-12T16:28:14,953 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting b39cb6b0ea2f42fabc2aff3a5499a725, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1734020890234 2024-12-12T16:28:14,953 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 35da8f9f921d47deb9e88339f2a61df6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1734020891369 2024-12-12T16:28:14,953 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 634affaea444429d94aa0cd489dde909, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1734020891369 2024-12-12T16:28:14,954 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 91f67ac4367241e7b378c0ef9ec5b1b5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1734020892561 2024-12-12T16:28:14,954 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6285c43c28824a01b04c66e9ffba8876, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1734020892561 2024-12-12T16:28:14,964 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#A#compaction#264 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:14,964 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/983be55cc8044e54bb7437bfa73652f6 is 50, key is test_row_0/A:col10/1734020892561/Put/seqid=0 2024-12-12T16:28:14,966 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#B#compaction#265 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:14,967 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/61c4f771fe15475aa1b0461662820506 is 50, key is test_row_0/B:col10/1734020892561/Put/seqid=0 2024-12-12T16:28:14,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742136_1312 (size=12949) 2024-12-12T16:28:14,988 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/61c4f771fe15475aa1b0461662820506 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/61c4f771fe15475aa1b0461662820506 2024-12-12T16:28:14,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742137_1313 (size=12949) 2024-12-12T16:28:14,992 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/B of d67e4a55402e756819e4e8a994aa3c46 into 61c4f771fe15475aa1b0461662820506(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:14,992 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:14,993 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/B, priority=12, startTime=1734020894950; duration=0sec 2024-12-12T16:28:14,993 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:14,993 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:B 2024-12-12T16:28:14,993 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:28:14,994 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49266 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:28:14,994 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/C is initiating minor compaction (all files) 2024-12-12T16:28:14,994 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/C in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:14,995 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/b521c03e22fc4171a62e3263bfb7b757, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/82786d3ec8704ba0b92eff66192cf911, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6817c5899c7b47039d86ad9726729e66, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/5005bb446a4a460ea3af5fbf2d93fec1] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=48.1 K 2024-12-12T16:28:14,995 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting b521c03e22fc4171a62e3263bfb7b757, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1734020889570 2024-12-12T16:28:14,995 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 82786d3ec8704ba0b92eff66192cf911, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1734020890234 2024-12-12T16:28:14,996 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 6817c5899c7b47039d86ad9726729e66, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1734020891369 2024-12-12T16:28:14,996 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 5005bb446a4a460ea3af5fbf2d93fec1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1734020892561 2024-12-12T16:28:15,005 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#C#compaction#266 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:15,006 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/927324f133744e73985e7d5e8f196cd3 is 50, key is test_row_0/C:col10/1734020892561/Put/seqid=0 2024-12-12T16:28:15,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742138_1314 (size=12949) 2024-12-12T16:28:15,015 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/927324f133744e73985e7d5e8f196cd3 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/927324f133744e73985e7d5e8f196cd3 2024-12-12T16:28:15,020 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/C of d67e4a55402e756819e4e8a994aa3c46 into 927324f133744e73985e7d5e8f196cd3(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:15,020 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:15,020 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/C, priority=12, startTime=1734020894950; duration=0sec 2024-12-12T16:28:15,021 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:15,021 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:C 2024-12-12T16:28:15,036 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:15,037 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-12T16:28:15,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:15,037 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-12T16:28:15,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:15,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:15,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:15,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:15,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:15,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:15,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/19dfff15fffd4e2c88945f7346805f98 is 50, key is test_row_0/A:col10/1734020893707/Put/seqid=0 2024-12-12T16:28:15,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742139_1315 (size=12301) 2024-12-12T16:28:15,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-12T16:28:15,397 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/983be55cc8044e54bb7437bfa73652f6 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/983be55cc8044e54bb7437bfa73652f6 2024-12-12T16:28:15,402 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/A of d67e4a55402e756819e4e8a994aa3c46 into 983be55cc8044e54bb7437bfa73652f6(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:15,403 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:15,403 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/A, priority=12, startTime=1734020894949; duration=0sec 2024-12-12T16:28:15,403 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:15,403 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:A 2024-12-12T16:28:15,459 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=283 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/19dfff15fffd4e2c88945f7346805f98 2024-12-12T16:28:15,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/2c7c90f36dfb4116acdc98f6416046ee is 50, key is test_row_0/B:col10/1734020893707/Put/seqid=0 2024-12-12T16:28:15,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742140_1316 (size=12301) 2024-12-12T16:28:15,474 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=283 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/2c7c90f36dfb4116acdc98f6416046ee 2024-12-12T16:28:15,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/e49356ac40364677969dbc411eed004b is 50, key is test_row_0/C:col10/1734020893707/Put/seqid=0 2024-12-12T16:28:15,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742141_1317 (size=12301) 2024-12-12T16:28:15,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:15,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:15,877 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:15,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020955876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:15,888 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=283 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/e49356ac40364677969dbc411eed004b 2024-12-12T16:28:15,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:15,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55088 deadline: 1734020955890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:15,893 DEBUG [Thread-1170 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8144 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:28:15,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/19dfff15fffd4e2c88945f7346805f98 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/19dfff15fffd4e2c88945f7346805f98 2024-12-12T16:28:15,897 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/19dfff15fffd4e2c88945f7346805f98, entries=150, sequenceid=283, filesize=12.0 K 2024-12-12T16:28:15,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/2c7c90f36dfb4116acdc98f6416046ee as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/2c7c90f36dfb4116acdc98f6416046ee 2024-12-12T16:28:15,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:15,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55124 deadline: 1734020955900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:15,901 DEBUG [Thread-1176 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8155 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:28:15,902 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/2c7c90f36dfb4116acdc98f6416046ee, entries=150, sequenceid=283, filesize=12.0 K 2024-12-12T16:28:15,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/e49356ac40364677969dbc411eed004b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/e49356ac40364677969dbc411eed004b 2024-12-12T16:28:15,907 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/e49356ac40364677969dbc411eed004b, entries=150, sequenceid=283, filesize=12.0 K 2024-12-12T16:28:15,908 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for d67e4a55402e756819e4e8a994aa3c46 in 871ms, sequenceid=283, compaction requested=false 2024-12-12T16:28:15,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:15,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:15,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-12-12T16:28:15,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-12-12T16:28:15,911 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-12T16:28:15,911 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9500 sec 2024-12-12T16:28:15,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.9560 sec 2024-12-12T16:28:15,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:15,981 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-12T16:28:15,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:15,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:15,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:15,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:15,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:15,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:15,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/da9afb064eb24862b7dfbff2a6ec5d2d is 50, key is test_row_0/A:col10/1734020895979/Put/seqid=0 2024-12-12T16:28:15,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742142_1318 (size=12301) 2024-12-12T16:28:15,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:15,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020955996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:16,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-12T16:28:16,062 INFO [Thread-1178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-12-12T16:28:16,063 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:28:16,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-12-12T16:28:16,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-12T16:28:16,065 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:28:16,066 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:28:16,066 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:28:16,099 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:16,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020956098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:16,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-12T16:28:16,217 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:16,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-12T16:28:16,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:16,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:16,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:16,218 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:16,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:16,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:16,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:16,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020956300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:16,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-12T16:28:16,371 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:16,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-12T16:28:16,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:16,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:16,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:16,371 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:16,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:16,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:16,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/da9afb064eb24862b7dfbff2a6ec5d2d 2024-12-12T16:28:16,398 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/ee70cc5485cd4c2ea9d90c01dc96e427 is 50, key is test_row_0/B:col10/1734020895979/Put/seqid=0 2024-12-12T16:28:16,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742143_1319 (size=12301) 2024-12-12T16:28:16,403 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/ee70cc5485cd4c2ea9d90c01dc96e427 2024-12-12T16:28:16,410 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/f3fe504acb5c45d69774ada363464527 is 50, key is test_row_0/C:col10/1734020895979/Put/seqid=0 2024-12-12T16:28:16,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742144_1320 (size=12301) 2024-12-12T16:28:16,524 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:16,524 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-12T16:28:16,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:16,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:16,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:16,524 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:16,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:16,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:16,605 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:16,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020956604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:16,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-12T16:28:16,677 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:16,677 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-12T16:28:16,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:16,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:16,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:16,678 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:16,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:16,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:16,826 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/f3fe504acb5c45d69774ada363464527 2024-12-12T16:28:16,830 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:16,830 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-12T16:28:16,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:16,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:16,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:16,831 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:16,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:16,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/da9afb064eb24862b7dfbff2a6ec5d2d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/da9afb064eb24862b7dfbff2a6ec5d2d 2024-12-12T16:28:16,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:16,836 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/da9afb064eb24862b7dfbff2a6ec5d2d, entries=150, sequenceid=311, filesize=12.0 K 2024-12-12T16:28:16,837 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/ee70cc5485cd4c2ea9d90c01dc96e427 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ee70cc5485cd4c2ea9d90c01dc96e427 2024-12-12T16:28:16,841 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ee70cc5485cd4c2ea9d90c01dc96e427, entries=150, sequenceid=311, filesize=12.0 K 2024-12-12T16:28:16,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/f3fe504acb5c45d69774ada363464527 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/f3fe504acb5c45d69774ada363464527 2024-12-12T16:28:16,846 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/f3fe504acb5c45d69774ada363464527, entries=150, sequenceid=311, filesize=12.0 K 2024-12-12T16:28:16,847 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for d67e4a55402e756819e4e8a994aa3c46 in 867ms, sequenceid=311, compaction requested=true 2024-12-12T16:28:16,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:16,847 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:16,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:28:16,848 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:16,849 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/A is initiating minor compaction (all files) 2024-12-12T16:28:16,849 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/A in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:16,849 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/983be55cc8044e54bb7437bfa73652f6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/19dfff15fffd4e2c88945f7346805f98, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/da9afb064eb24862b7dfbff2a6ec5d2d] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=36.7 K 2024-12-12T16:28:16,849 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 983be55cc8044e54bb7437bfa73652f6, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1734020892561 2024-12-12T16:28:16,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:16,849 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:16,850 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19dfff15fffd4e2c88945f7346805f98, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1734020893696 2024-12-12T16:28:16,850 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:28:16,850 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting da9afb064eb24862b7dfbff2a6ec5d2d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1734020895871 2024-12-12T16:28:16,851 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:16,851 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/B is initiating minor compaction (all files) 2024-12-12T16:28:16,851 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/B in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:16,851 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/61c4f771fe15475aa1b0461662820506, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/2c7c90f36dfb4116acdc98f6416046ee, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ee70cc5485cd4c2ea9d90c01dc96e427] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=36.7 K 2024-12-12T16:28:16,851 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 61c4f771fe15475aa1b0461662820506, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1734020892561 2024-12-12T16:28:16,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:16,852 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c7c90f36dfb4116acdc98f6416046ee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1734020893696 2024-12-12T16:28:16,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:28:16,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:16,852 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting ee70cc5485cd4c2ea9d90c01dc96e427, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1734020895871 2024-12-12T16:28:16,861 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#A#compaction#273 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:16,861 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/c8ecc55c9b1c4ee08623b775c9faaf2e is 50, key is test_row_0/A:col10/1734020895979/Put/seqid=0 2024-12-12T16:28:16,866 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#B#compaction#274 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:16,866 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/f3f6e19180cc402a97f57fd1fcadc313 is 50, key is test_row_0/B:col10/1734020895979/Put/seqid=0 2024-12-12T16:28:16,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742145_1321 (size=13051) 2024-12-12T16:28:16,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742146_1322 (size=13051) 2024-12-12T16:28:16,984 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:16,985 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-12T16:28:16,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:16,985 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-12T16:28:16,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:16,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:16,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:16,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:16,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:16,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:16,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/9a9cc7131208447c83b00dc491d9ea74 is 50, key is test_row_0/A:col10/1734020895995/Put/seqid=0 2024-12-12T16:28:16,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742147_1323 (size=12301) 2024-12-12T16:28:17,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:17,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:17,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-12T16:28:17,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:17,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020957171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:17,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:17,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020957274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:17,293 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/c8ecc55c9b1c4ee08623b775c9faaf2e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c8ecc55c9b1c4ee08623b775c9faaf2e 2024-12-12T16:28:17,298 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/A of d67e4a55402e756819e4e8a994aa3c46 into c8ecc55c9b1c4ee08623b775c9faaf2e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:17,298 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:17,298 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/A, priority=13, startTime=1734020896847; duration=0sec 2024-12-12T16:28:17,298 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:17,298 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:A 2024-12-12T16:28:17,298 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:17,300 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:17,300 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/C is initiating minor compaction (all files) 2024-12-12T16:28:17,300 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/C in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:17,300 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/927324f133744e73985e7d5e8f196cd3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/e49356ac40364677969dbc411eed004b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/f3fe504acb5c45d69774ada363464527] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=36.7 K 2024-12-12T16:28:17,301 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 927324f133744e73985e7d5e8f196cd3, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1734020892561 2024-12-12T16:28:17,301 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/f3f6e19180cc402a97f57fd1fcadc313 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/f3f6e19180cc402a97f57fd1fcadc313 2024-12-12T16:28:17,302 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting e49356ac40364677969dbc411eed004b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=283, earliestPutTs=1734020893696 2024-12-12T16:28:17,302 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3fe504acb5c45d69774ada363464527, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1734020895871 2024-12-12T16:28:17,306 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/B of d67e4a55402e756819e4e8a994aa3c46 into f3f6e19180cc402a97f57fd1fcadc313(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:17,306 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:17,306 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/B, priority=13, startTime=1734020896849; duration=0sec 2024-12-12T16:28:17,306 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:17,306 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:B 2024-12-12T16:28:17,311 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#C#compaction#276 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:17,311 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/5f98dee9aa8248b5ad449b0cf891e172 is 50, key is test_row_0/C:col10/1734020895979/Put/seqid=0 2024-12-12T16:28:17,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742148_1324 (size=13051) 2024-12-12T16:28:17,349 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/5f98dee9aa8248b5ad449b0cf891e172 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/5f98dee9aa8248b5ad449b0cf891e172 2024-12-12T16:28:17,355 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/C of d67e4a55402e756819e4e8a994aa3c46 into 5f98dee9aa8248b5ad449b0cf891e172(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:17,355 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:17,355 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/C, priority=13, startTime=1734020896851; duration=0sec 2024-12-12T16:28:17,355 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:17,355 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:C 2024-12-12T16:28:17,399 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/9a9cc7131208447c83b00dc491d9ea74 2024-12-12T16:28:17,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/4874c08b95594af3a141c75fe5813ab5 is 50, key is test_row_0/B:col10/1734020895995/Put/seqid=0 2024-12-12T16:28:17,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742149_1325 (size=12301) 2024-12-12T16:28:17,422 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/4874c08b95594af3a141c75fe5813ab5 2024-12-12T16:28:17,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/2dc397eb5094484d86723a4873fd1782 is 50, key is test_row_0/C:col10/1734020895995/Put/seqid=0 2024-12-12T16:28:17,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742150_1326 (size=12301) 2024-12-12T16:28:17,468 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/2dc397eb5094484d86723a4873fd1782 2024-12-12T16:28:17,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/9a9cc7131208447c83b00dc491d9ea74 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/9a9cc7131208447c83b00dc491d9ea74 2024-12-12T16:28:17,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:17,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020957478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:17,484 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/9a9cc7131208447c83b00dc491d9ea74, entries=150, sequenceid=320, filesize=12.0 K 2024-12-12T16:28:17,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/4874c08b95594af3a141c75fe5813ab5 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/4874c08b95594af3a141c75fe5813ab5 2024-12-12T16:28:17,491 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/4874c08b95594af3a141c75fe5813ab5, entries=150, sequenceid=320, filesize=12.0 K 2024-12-12T16:28:17,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/2dc397eb5094484d86723a4873fd1782 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/2dc397eb5094484d86723a4873fd1782 2024-12-12T16:28:17,497 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/2dc397eb5094484d86723a4873fd1782, entries=150, sequenceid=320, filesize=12.0 K 2024-12-12T16:28:17,499 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for d67e4a55402e756819e4e8a994aa3c46 in 514ms, sequenceid=320, compaction requested=false 2024-12-12T16:28:17,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:17,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:17,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-12-12T16:28:17,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-12-12T16:28:17,504 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-12-12T16:28:17,505 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4350 sec 2024-12-12T16:28:17,507 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 1.4420 sec 2024-12-12T16:28:17,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:17,782 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-12T16:28:17,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:17,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:17,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:17,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:17,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:17,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:17,787 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/7c58a326c9264deea76cb48cb4484a4c is 50, key is test_row_0/A:col10/1734020897165/Put/seqid=0 2024-12-12T16:28:17,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742151_1327 (size=12301) 2024-12-12T16:28:17,797 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:17,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020957795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:17,808 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T16:28:17,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:17,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020957898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:18,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:18,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020958100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:18,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-12T16:28:18,169 INFO [Thread-1178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-12-12T16:28:18,170 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:28:18,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-12-12T16:28:18,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-12T16:28:18,173 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:28:18,174 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:28:18,174 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:28:18,192 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/7c58a326c9264deea76cb48cb4484a4c 2024-12-12T16:28:18,199 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/33fb834fef674247bce473eee46de3b6 is 50, key is test_row_0/B:col10/1734020897165/Put/seqid=0 2024-12-12T16:28:18,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742152_1328 (size=12301) 2024-12-12T16:28:18,204 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/33fb834fef674247bce473eee46de3b6 2024-12-12T16:28:18,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/e9c9ecd3dcfe4233b83f9e674304a7c9 is 50, key is test_row_0/C:col10/1734020897165/Put/seqid=0 2024-12-12T16:28:18,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742153_1329 (size=12301) 2024-12-12T16:28:18,218 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/e9c9ecd3dcfe4233b83f9e674304a7c9 2024-12-12T16:28:18,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/7c58a326c9264deea76cb48cb4484a4c as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/7c58a326c9264deea76cb48cb4484a4c 2024-12-12T16:28:18,227 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/7c58a326c9264deea76cb48cb4484a4c, entries=150, sequenceid=351, filesize=12.0 K 2024-12-12T16:28:18,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/33fb834fef674247bce473eee46de3b6 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/33fb834fef674247bce473eee46de3b6 2024-12-12T16:28:18,231 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/33fb834fef674247bce473eee46de3b6, entries=150, sequenceid=351, filesize=12.0 K 2024-12-12T16:28:18,232 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/e9c9ecd3dcfe4233b83f9e674304a7c9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/e9c9ecd3dcfe4233b83f9e674304a7c9 2024-12-12T16:28:18,237 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/e9c9ecd3dcfe4233b83f9e674304a7c9, entries=150, sequenceid=351, filesize=12.0 K 2024-12-12T16:28:18,238 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for d67e4a55402e756819e4e8a994aa3c46 in 455ms, sequenceid=351, compaction requested=true 2024-12-12T16:28:18,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:18,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:28:18,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:18,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:28:18,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:18,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:28:18,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:18,238 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:18,238 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:18,239 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:18,239 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/B is initiating minor compaction (all files) 2024-12-12T16:28:18,239 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/B in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:18,240 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/f3f6e19180cc402a97f57fd1fcadc313, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/4874c08b95594af3a141c75fe5813ab5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/33fb834fef674247bce473eee46de3b6] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=36.8 K 2024-12-12T16:28:18,240 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:18,240 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/A is initiating minor compaction (all files) 2024-12-12T16:28:18,240 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/A in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:18,240 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c8ecc55c9b1c4ee08623b775c9faaf2e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/9a9cc7131208447c83b00dc491d9ea74, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/7c58a326c9264deea76cb48cb4484a4c] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=36.8 K 2024-12-12T16:28:18,240 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting f3f6e19180cc402a97f57fd1fcadc313, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1734020895871 2024-12-12T16:28:18,241 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8ecc55c9b1c4ee08623b775c9faaf2e, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1734020895871 2024-12-12T16:28:18,241 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 4874c08b95594af3a141c75fe5813ab5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1734020895985 2024-12-12T16:28:18,241 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a9cc7131208447c83b00dc491d9ea74, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1734020895985 2024-12-12T16:28:18,241 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 33fb834fef674247bce473eee46de3b6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1734020897163 2024-12-12T16:28:18,242 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c58a326c9264deea76cb48cb4484a4c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1734020897163 2024-12-12T16:28:18,252 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#B#compaction#282 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:18,252 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/d871c30a6a76410a8dcbc8a0266d55d8 is 50, key is test_row_0/B:col10/1734020897165/Put/seqid=0 2024-12-12T16:28:18,258 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#A#compaction#283 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:18,258 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/e3de49636a894fb99dfe27b529f39c67 is 50, key is test_row_0/A:col10/1734020897165/Put/seqid=0 2024-12-12T16:28:18,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-12T16:28:18,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742154_1330 (size=13153) 2024-12-12T16:28:18,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742155_1331 (size=13153) 2024-12-12T16:28:18,325 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:18,326 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-12T16:28:18,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:18,326 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-12T16:28:18,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:18,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:18,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:18,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:18,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:18,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:18,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/859bc7ea55b64513b04cc8960ead1fdd is 50, key is test_row_0/A:col10/1734020897794/Put/seqid=0 2024-12-12T16:28:18,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742156_1332 (size=12301) 2024-12-12T16:28:18,339 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/859bc7ea55b64513b04cc8960ead1fdd 2024-12-12T16:28:18,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/93095537adb04f688455177fa486b221 is 50, key is test_row_0/B:col10/1734020897794/Put/seqid=0 2024-12-12T16:28:18,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742157_1333 (size=12301) 2024-12-12T16:28:18,354 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/93095537adb04f688455177fa486b221 2024-12-12T16:28:18,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/0a4910d10d2a49bcb12566ac6918189e is 50, key is test_row_0/C:col10/1734020897794/Put/seqid=0 2024-12-12T16:28:18,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742158_1334 (size=12301) 2024-12-12T16:28:18,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:18,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:18,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:18,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020958465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:18,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-12T16:28:18,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:18,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020958567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:18,682 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/d871c30a6a76410a8dcbc8a0266d55d8 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/d871c30a6a76410a8dcbc8a0266d55d8 2024-12-12T16:28:18,687 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/B of d67e4a55402e756819e4e8a994aa3c46 into d871c30a6a76410a8dcbc8a0266d55d8(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:18,687 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:18,687 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/B, priority=13, startTime=1734020898238; duration=0sec 2024-12-12T16:28:18,687 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:18,687 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:B 2024-12-12T16:28:18,687 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:18,688 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:18,688 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/C is initiating minor compaction (all files) 2024-12-12T16:28:18,688 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/C in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:18,688 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/5f98dee9aa8248b5ad449b0cf891e172, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/2dc397eb5094484d86723a4873fd1782, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/e9c9ecd3dcfe4233b83f9e674304a7c9] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=36.8 K 2024-12-12T16:28:18,689 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f98dee9aa8248b5ad449b0cf891e172, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1734020895871 2024-12-12T16:28:18,689 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 2dc397eb5094484d86723a4873fd1782, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1734020895985 2024-12-12T16:28:18,689 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting e9c9ecd3dcfe4233b83f9e674304a7c9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1734020897163 2024-12-12T16:28:18,696 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#C#compaction#287 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:18,696 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/4cdd70df7896414f90e504d1e53e03d8 is 50, key is test_row_0/C:col10/1734020897165/Put/seqid=0 2024-12-12T16:28:18,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742159_1335 (size=13153) 2024-12-12T16:28:18,703 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/e3de49636a894fb99dfe27b529f39c67 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/e3de49636a894fb99dfe27b529f39c67 2024-12-12T16:28:18,709 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/A of d67e4a55402e756819e4e8a994aa3c46 into e3de49636a894fb99dfe27b529f39c67(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:18,709 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:18,709 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/A, priority=13, startTime=1734020898238; duration=0sec 2024-12-12T16:28:18,709 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:18,709 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:A 2024-12-12T16:28:18,712 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/4cdd70df7896414f90e504d1e53e03d8 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/4cdd70df7896414f90e504d1e53e03d8 2024-12-12T16:28:18,716 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/C of d67e4a55402e756819e4e8a994aa3c46 into 4cdd70df7896414f90e504d1e53e03d8(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:18,716 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:18,716 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/C, priority=13, startTime=1734020898238; duration=0sec 2024-12-12T16:28:18,716 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:18,716 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:C 2024-12-12T16:28:18,766 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/0a4910d10d2a49bcb12566ac6918189e 2024-12-12T16:28:18,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/859bc7ea55b64513b04cc8960ead1fdd as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/859bc7ea55b64513b04cc8960ead1fdd 2024-12-12T16:28:18,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:18,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020958771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:18,775 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/859bc7ea55b64513b04cc8960ead1fdd, entries=150, sequenceid=359, filesize=12.0 K 2024-12-12T16:28:18,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-12T16:28:18,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/93095537adb04f688455177fa486b221 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/93095537adb04f688455177fa486b221 2024-12-12T16:28:18,779 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/93095537adb04f688455177fa486b221, entries=150, sequenceid=359, filesize=12.0 K 2024-12-12T16:28:18,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/0a4910d10d2a49bcb12566ac6918189e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/0a4910d10d2a49bcb12566ac6918189e 2024-12-12T16:28:18,785 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/0a4910d10d2a49bcb12566ac6918189e, entries=150, sequenceid=359, filesize=12.0 K 2024-12-12T16:28:18,786 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for d67e4a55402e756819e4e8a994aa3c46 in 460ms, sequenceid=359, compaction requested=false 2024-12-12T16:28:18,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:18,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:18,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-12T16:28:18,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-12T16:28:18,790 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-12-12T16:28:18,790 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 615 msec 2024-12-12T16:28:18,792 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 621 msec 2024-12-12T16:28:19,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:19,074 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-12T16:28:19,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:19,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:19,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:19,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:19,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:19,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:19,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/c7ef12427f894b66887fbd0f568fe84f is 50, key is test_row_0/A:col10/1734020899073/Put/seqid=0 2024-12-12T16:28:19,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742160_1336 (size=12301) 2024-12-12T16:28:19,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:19,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020959086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:19,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:19,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020959189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:19,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-12T16:28:19,276 INFO [Thread-1178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-12-12T16:28:19,277 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:28:19,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-12-12T16:28:19,279 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:28:19,279 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:28:19,279 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:28:19,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-12T16:28:19,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-12T16:28:19,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:19,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020959391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:19,431 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:19,431 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-12T16:28:19,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:19,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:19,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:19,432 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:19,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:19,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:19,486 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=391 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/c7ef12427f894b66887fbd0f568fe84f 2024-12-12T16:28:19,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/fb3ce461b4bd437d9a27230dbfe72485 is 50, key is test_row_0/B:col10/1734020899073/Put/seqid=0 2024-12-12T16:28:19,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742161_1337 (size=12301) 2024-12-12T16:28:19,499 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=391 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/fb3ce461b4bd437d9a27230dbfe72485 2024-12-12T16:28:19,506 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/409d902004b0483c96ed218ff4512220 is 50, key is test_row_0/C:col10/1734020899073/Put/seqid=0 2024-12-12T16:28:19,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742162_1338 (size=12301) 2024-12-12T16:28:19,515 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=391 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/409d902004b0483c96ed218ff4512220 2024-12-12T16:28:19,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/c7ef12427f894b66887fbd0f568fe84f as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c7ef12427f894b66887fbd0f568fe84f 2024-12-12T16:28:19,524 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c7ef12427f894b66887fbd0f568fe84f, entries=150, sequenceid=391, filesize=12.0 K 2024-12-12T16:28:19,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/fb3ce461b4bd437d9a27230dbfe72485 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/fb3ce461b4bd437d9a27230dbfe72485 2024-12-12T16:28:19,530 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/fb3ce461b4bd437d9a27230dbfe72485, entries=150, sequenceid=391, filesize=12.0 K 2024-12-12T16:28:19,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/409d902004b0483c96ed218ff4512220 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/409d902004b0483c96ed218ff4512220 2024-12-12T16:28:19,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/409d902004b0483c96ed218ff4512220, entries=150, sequenceid=391, filesize=12.0 K 2024-12-12T16:28:19,536 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for d67e4a55402e756819e4e8a994aa3c46 in 461ms, sequenceid=391, compaction requested=true 2024-12-12T16:28:19,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:19,536 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:19,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:28:19,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:19,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:28:19,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:19,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:28:19,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:19,536 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:19,537 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:19,537 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/B is initiating minor compaction (all files) 2024-12-12T16:28:19,538 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/B in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:19,538 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/d871c30a6a76410a8dcbc8a0266d55d8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/93095537adb04f688455177fa486b221, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/fb3ce461b4bd437d9a27230dbfe72485] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=36.9 K 2024-12-12T16:28:19,538 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:19,538 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/A is initiating minor compaction (all files) 2024-12-12T16:28:19,538 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/A in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:19,538 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/e3de49636a894fb99dfe27b529f39c67, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/859bc7ea55b64513b04cc8960ead1fdd, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c7ef12427f894b66887fbd0f568fe84f] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=36.9 K 2024-12-12T16:28:19,539 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting d871c30a6a76410a8dcbc8a0266d55d8, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1734020897163 2024-12-12T16:28:19,539 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3de49636a894fb99dfe27b529f39c67, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1734020897163 2024-12-12T16:28:19,539 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 93095537adb04f688455177fa486b221, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1734020897787 2024-12-12T16:28:19,539 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 859bc7ea55b64513b04cc8960ead1fdd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1734020897787 2024-12-12T16:28:19,539 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting fb3ce461b4bd437d9a27230dbfe72485, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1734020898459 2024-12-12T16:28:19,540 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7ef12427f894b66887fbd0f568fe84f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1734020898459 2024-12-12T16:28:19,550 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#B#compaction#291 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:19,550 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/37024600112640c9b2da1305037efe65 is 50, key is test_row_0/B:col10/1734020899073/Put/seqid=0 2024-12-12T16:28:19,558 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#A#compaction#292 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:19,558 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/66b0e95c5dec41208ab2ea4babcd77b4 is 50, key is test_row_0/A:col10/1734020899073/Put/seqid=0 2024-12-12T16:28:19,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742163_1339 (size=13255) 2024-12-12T16:28:19,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-12T16:28:19,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742164_1340 (size=13255) 2024-12-12T16:28:19,584 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:19,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-12T16:28:19,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:19,585 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-12T16:28:19,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:19,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:19,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:19,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:19,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:19,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:19,588 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/37024600112640c9b2da1305037efe65 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/37024600112640c9b2da1305037efe65 2024-12-12T16:28:19,590 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/66b0e95c5dec41208ab2ea4babcd77b4 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/66b0e95c5dec41208ab2ea4babcd77b4 2024-12-12T16:28:19,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/8f2d6522e2a84fbeb752c55661c259bd is 50, key is test_row_1/A:col10/1734020899078/Put/seqid=0 2024-12-12T16:28:19,597 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/B of d67e4a55402e756819e4e8a994aa3c46 into 37024600112640c9b2da1305037efe65(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:19,597 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:19,597 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/B, priority=13, startTime=1734020899536; duration=0sec 2024-12-12T16:28:19,597 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:19,597 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:B 2024-12-12T16:28:19,597 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:19,598 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/A of d67e4a55402e756819e4e8a994aa3c46 into 66b0e95c5dec41208ab2ea4babcd77b4(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:19,598 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:19,598 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/A, priority=13, startTime=1734020899536; duration=0sec 2024-12-12T16:28:19,598 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:19,598 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:A 2024-12-12T16:28:19,599 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:19,599 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/C is initiating minor compaction (all files) 2024-12-12T16:28:19,599 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/C in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:19,599 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/4cdd70df7896414f90e504d1e53e03d8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/0a4910d10d2a49bcb12566ac6918189e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/409d902004b0483c96ed218ff4512220] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=36.9 K 2024-12-12T16:28:19,599 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 4cdd70df7896414f90e504d1e53e03d8, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1734020897163 2024-12-12T16:28:19,600 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a4910d10d2a49bcb12566ac6918189e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1734020897787 2024-12-12T16:28:19,600 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 409d902004b0483c96ed218ff4512220, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1734020898459 2024-12-12T16:28:19,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742165_1341 (size=9857) 2024-12-12T16:28:19,611 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#C#compaction#294 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:19,611 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/447a1f5bdd634757a17970ec64f514e9 is 50, key is test_row_0/C:col10/1734020899073/Put/seqid=0 2024-12-12T16:28:19,611 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/8f2d6522e2a84fbeb752c55661c259bd 2024-12-12T16:28:19,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742166_1342 (size=13255) 2024-12-12T16:28:19,626 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/447a1f5bdd634757a17970ec64f514e9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/447a1f5bdd634757a17970ec64f514e9 2024-12-12T16:28:19,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/70eab815ff1844d38bd738abc8e5c120 is 50, key is test_row_1/B:col10/1734020899078/Put/seqid=0 2024-12-12T16:28:19,635 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/C of d67e4a55402e756819e4e8a994aa3c46 into 447a1f5bdd634757a17970ec64f514e9(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:19,635 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:19,635 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/C, priority=13, startTime=1734020899536; duration=0sec 2024-12-12T16:28:19,636 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:19,636 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:C 2024-12-12T16:28:19,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. as already flushing 2024-12-12T16:28:19,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:19,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742167_1343 (size=9857) 2024-12-12T16:28:19,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:19,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55096 deadline: 1734020959688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:19,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:19,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55052 deadline: 1734020959688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:19,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:19,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020959694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:19,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:19,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55096 deadline: 1734020959791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:19,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:19,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55052 deadline: 1734020959791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:19,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-12T16:28:19,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:19,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55096 deadline: 1734020959993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:19,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:19,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55052 deadline: 1734020959993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:20,067 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/70eab815ff1844d38bd738abc8e5c120 2024-12-12T16:28:20,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/4c8bdd3f8cdf4c93be09074430b74658 is 50, key is test_row_1/C:col10/1734020899078/Put/seqid=0 2024-12-12T16:28:20,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742168_1344 (size=9857) 2024-12-12T16:28:20,079 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/4c8bdd3f8cdf4c93be09074430b74658 2024-12-12T16:28:20,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/8f2d6522e2a84fbeb752c55661c259bd as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8f2d6522e2a84fbeb752c55661c259bd 2024-12-12T16:28:20,088 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8f2d6522e2a84fbeb752c55661c259bd, entries=100, sequenceid=398, filesize=9.6 K 2024-12-12T16:28:20,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/70eab815ff1844d38bd738abc8e5c120 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/70eab815ff1844d38bd738abc8e5c120 2024-12-12T16:28:20,095 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/70eab815ff1844d38bd738abc8e5c120, entries=100, sequenceid=398, filesize=9.6 K 2024-12-12T16:28:20,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/4c8bdd3f8cdf4c93be09074430b74658 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/4c8bdd3f8cdf4c93be09074430b74658 2024-12-12T16:28:20,100 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/4c8bdd3f8cdf4c93be09074430b74658, entries=100, sequenceid=398, filesize=9.6 K 2024-12-12T16:28:20,101 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for d67e4a55402e756819e4e8a994aa3c46 in 516ms, sequenceid=398, compaction requested=false 2024-12-12T16:28:20,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:20,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:20,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-12T16:28:20,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-12T16:28:20,105 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-12T16:28:20,105 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 824 msec 2024-12-12T16:28:20,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 829 msec 2024-12-12T16:28:20,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:20,201 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-12T16:28:20,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:20,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:20,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:20,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:20,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:20,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:20,207 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/edd239db72cb46c4a418fc1d7ce15d70 is 50, key is test_row_0/A:col10/1734020899687/Put/seqid=0 2024-12-12T16:28:20,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742169_1345 (size=12301) 2024-12-12T16:28:20,211 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/edd239db72cb46c4a418fc1d7ce15d70 2024-12-12T16:28:20,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:20,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020960210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:20,218 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/499f7d8cbb0044c694cc467dc8b71659 is 50, key is test_row_0/B:col10/1734020899687/Put/seqid=0 2024-12-12T16:28:20,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742170_1346 (size=12301) 2024-12-12T16:28:20,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:20,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55052 deadline: 1734020960296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:20,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:20,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55096 deadline: 1734020960297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:20,313 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:20,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020960313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:20,328 DEBUG [Thread-1187 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x56e9a678 to 127.0.0.1:52684 2024-12-12T16:28:20,328 DEBUG [Thread-1187 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:20,328 DEBUG [Thread-1185 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4b9e2976 to 127.0.0.1:52684 2024-12-12T16:28:20,328 DEBUG [Thread-1185 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:20,329 DEBUG [Thread-1183 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x328852db to 127.0.0.1:52684 2024-12-12T16:28:20,329 DEBUG [Thread-1183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:20,330 DEBUG [Thread-1179 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x131ceb8f to 127.0.0.1:52684 2024-12-12T16:28:20,330 DEBUG [Thread-1179 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:20,330 DEBUG [Thread-1181 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5a78bf6d to 127.0.0.1:52684 2024-12-12T16:28:20,330 DEBUG [Thread-1181 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:20,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-12T16:28:20,383 INFO [Thread-1178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-12-12T16:28:20,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:20,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020960515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:20,622 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/499f7d8cbb0044c694cc467dc8b71659 2024-12-12T16:28:20,629 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/4878dd8a5c68414aa803593ce9e302b3 is 50, key is test_row_0/C:col10/1734020899687/Put/seqid=0 2024-12-12T16:28:20,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742171_1347 (size=12301) 2024-12-12T16:28:20,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:20,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:20,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55096 deadline: 1734020960800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:20,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55052 deadline: 1734020960800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:20,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:20,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55110 deadline: 1734020960817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:21,032 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/4878dd8a5c68414aa803593ce9e302b3 2024-12-12T16:28:21,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/edd239db72cb46c4a418fc1d7ce15d70 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/edd239db72cb46c4a418fc1d7ce15d70 2024-12-12T16:28:21,039 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/edd239db72cb46c4a418fc1d7ce15d70, entries=150, sequenceid=431, filesize=12.0 K 2024-12-12T16:28:21,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/499f7d8cbb0044c694cc467dc8b71659 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/499f7d8cbb0044c694cc467dc8b71659 2024-12-12T16:28:21,042 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/499f7d8cbb0044c694cc467dc8b71659, entries=150, sequenceid=431, filesize=12.0 K 2024-12-12T16:28:21,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/4878dd8a5c68414aa803593ce9e302b3 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/4878dd8a5c68414aa803593ce9e302b3 2024-12-12T16:28:21,045 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/4878dd8a5c68414aa803593ce9e302b3, entries=150, sequenceid=431, filesize=12.0 K 2024-12-12T16:28:21,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for d67e4a55402e756819e4e8a994aa3c46 in 845ms, sequenceid=431, compaction requested=true 2024-12-12T16:28:21,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:21,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:28:21,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:21,046 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:21,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:28:21,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:21,046 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:21,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d67e4a55402e756819e4e8a994aa3c46:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:28:21,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:21,047 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35413 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:21,047 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/A is initiating minor compaction (all files) 2024-12-12T16:28:21,047 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35413 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:21,047 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/A in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:21,047 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/B is initiating minor compaction (all files) 2024-12-12T16:28:21,047 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/66b0e95c5dec41208ab2ea4babcd77b4, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8f2d6522e2a84fbeb752c55661c259bd, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/edd239db72cb46c4a418fc1d7ce15d70] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=34.6 K 2024-12-12T16:28:21,047 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/B in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:21,047 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/37024600112640c9b2da1305037efe65, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/70eab815ff1844d38bd738abc8e5c120, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/499f7d8cbb0044c694cc467dc8b71659] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=34.6 K 2024-12-12T16:28:21,048 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66b0e95c5dec41208ab2ea4babcd77b4, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1734020898459 2024-12-12T16:28:21,048 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 37024600112640c9b2da1305037efe65, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1734020898459 2024-12-12T16:28:21,048 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f2d6522e2a84fbeb752c55661c259bd, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1734020899078 2024-12-12T16:28:21,048 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 70eab815ff1844d38bd738abc8e5c120, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1734020899078 2024-12-12T16:28:21,048 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting edd239db72cb46c4a418fc1d7ce15d70, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1734020899686 2024-12-12T16:28:21,048 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 499f7d8cbb0044c694cc467dc8b71659, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1734020899686 2024-12-12T16:28:21,053 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#A#compaction#300 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:21,053 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#B#compaction#301 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:21,054 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/0e9b1368d8df40429edc3f39236de2d8 is 50, key is test_row_0/A:col10/1734020899687/Put/seqid=0 2024-12-12T16:28:21,054 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/ba42f27311634018921728c6d2a0a66c is 50, key is test_row_0/B:col10/1734020899687/Put/seqid=0 2024-12-12T16:28:21,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742172_1348 (size=13357) 2024-12-12T16:28:21,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742173_1349 (size=13357) 2024-12-12T16:28:21,320 DEBUG [Thread-1172 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17cf7fc0 to 127.0.0.1:52684 2024-12-12T16:28:21,320 DEBUG [Thread-1172 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:21,462 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/0e9b1368d8df40429edc3f39236de2d8 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/0e9b1368d8df40429edc3f39236de2d8 2024-12-12T16:28:21,462 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/ba42f27311634018921728c6d2a0a66c as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ba42f27311634018921728c6d2a0a66c 2024-12-12T16:28:21,466 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/A of d67e4a55402e756819e4e8a994aa3c46 into 0e9b1368d8df40429edc3f39236de2d8(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:21,466 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:21,466 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/A, priority=13, startTime=1734020901046; duration=0sec 2024-12-12T16:28:21,466 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:21,466 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:A 2024-12-12T16:28:21,466 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/B of d67e4a55402e756819e4e8a994aa3c46 into ba42f27311634018921728c6d2a0a66c(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:21,466 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:21,466 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:21,466 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/B, priority=13, startTime=1734020901046; duration=0sec 2024-12-12T16:28:21,466 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:21,466 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:B 2024-12-12T16:28:21,467 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35413 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:21,467 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): d67e4a55402e756819e4e8a994aa3c46/C is initiating minor compaction (all files) 2024-12-12T16:28:21,467 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d67e4a55402e756819e4e8a994aa3c46/C in TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:21,467 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/447a1f5bdd634757a17970ec64f514e9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/4c8bdd3f8cdf4c93be09074430b74658, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/4878dd8a5c68414aa803593ce9e302b3] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp, totalSize=34.6 K 2024-12-12T16:28:21,467 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 447a1f5bdd634757a17970ec64f514e9, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1734020898459 2024-12-12T16:28:21,467 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c8bdd3f8cdf4c93be09074430b74658, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1734020899078 2024-12-12T16:28:21,468 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4878dd8a5c68414aa803593ce9e302b3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1734020899686 2024-12-12T16:28:21,473 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d67e4a55402e756819e4e8a994aa3c46#C#compaction#302 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:21,473 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/25755eafcd15492bab9372061f01fc41 is 50, key is test_row_0/C:col10/1734020899687/Put/seqid=0 2024-12-12T16:28:21,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742174_1350 (size=13357) 2024-12-12T16:28:21,804 DEBUG [Thread-1168 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4ec09297 to 127.0.0.1:52684 2024-12-12T16:28:21,804 DEBUG [Thread-1174 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78b04266 to 127.0.0.1:52684 2024-12-12T16:28:21,804 DEBUG [Thread-1168 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:21,804 DEBUG [Thread-1174 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:21,882 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/25755eafcd15492bab9372061f01fc41 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/25755eafcd15492bab9372061f01fc41 2024-12-12T16:28:21,887 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d67e4a55402e756819e4e8a994aa3c46/C of d67e4a55402e756819e4e8a994aa3c46 into 25755eafcd15492bab9372061f01fc41(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:21,887 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:21,887 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46., storeName=d67e4a55402e756819e4e8a994aa3c46/C, priority=13, startTime=1734020901046; duration=0sec 2024-12-12T16:28:21,887 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:21,887 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d67e4a55402e756819e4e8a994aa3c46:C 2024-12-12T16:28:25,923 DEBUG [Thread-1176 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x088aa519 to 127.0.0.1:52684 2024-12-12T16:28:25,923 DEBUG [Thread-1176 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:25,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:25,972 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d67e4a55402e756819e4e8a994aa3c46 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:28:25,972 DEBUG [Thread-1170 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4dfb20f6 to 127.0.0.1:52684 2024-12-12T16:28:25,973 DEBUG [Thread-1170 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:25,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=A 2024-12-12T16:28:25,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:25,973 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T16:28:25,973 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 28 2024-12-12T16:28:25,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=B 2024-12-12T16:28:25,973 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 75 2024-12-12T16:28:25,973 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 170 2024-12-12T16:28:25,973 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 22 2024-12-12T16:28:25,973 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 50 2024-12-12T16:28:25,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:25,973 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T16:28:25,973 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7403 2024-12-12T16:28:25,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d67e4a55402e756819e4e8a994aa3c46, store=C 2024-12-12T16:28:25,973 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7207 2024-12-12T16:28:25,973 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7044 2024-12-12T16:28:25,973 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7379 2024-12-12T16:28:25,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:25,973 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7188 2024-12-12T16:28:25,973 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T16:28:25,973 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T16:28:25,973 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72aa9ee5 to 127.0.0.1:52684 2024-12-12T16:28:25,973 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:25,974 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T16:28:25,974 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T16:28:25,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T16:28:25,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-12T16:28:25,977 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020905977"}]},"ts":"1734020905977"} 2024-12-12T16:28:25,978 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/8422a16816bb448cbbde724f8cd37b0e is 50, key is test_row_0/A:col10/1734020901802/Put/seqid=0 2024-12-12T16:28:25,978 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T16:28:25,980 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T16:28:25,981 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T16:28:25,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742175_1351 (size=9857) 2024-12-12T16:28:25,983 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d67e4a55402e756819e4e8a994aa3c46, UNASSIGN}] 2024-12-12T16:28:25,983 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d67e4a55402e756819e4e8a994aa3c46, UNASSIGN 2024-12-12T16:28:25,984 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=d67e4a55402e756819e4e8a994aa3c46, regionState=CLOSING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:25,985 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T16:28:25,985 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; CloseRegionProcedure d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:28:26,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-12T16:28:26,136 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:26,137 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(124): Close d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:26,137 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T16:28:26,137 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1681): Closing d67e4a55402e756819e4e8a994aa3c46, disabling compactions & flushes 2024-12-12T16:28:26,137 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:26,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-12T16:28:26,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/8422a16816bb448cbbde724f8cd37b0e 2024-12-12T16:28:26,389 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/a33259af5fa24bc89541d1041af72c1c is 50, key is test_row_0/B:col10/1734020901802/Put/seqid=0 2024-12-12T16:28:26,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742176_1352 (size=9857) 2024-12-12T16:28:26,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-12T16:28:26,794 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/a33259af5fa24bc89541d1041af72c1c 2024-12-12T16:28:26,800 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/6c20a52b6d6347ebae55d975439ad38b is 50, key is test_row_0/C:col10/1734020901802/Put/seqid=0 2024-12-12T16:28:26,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742177_1353 (size=9857) 2024-12-12T16:28:27,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-12T16:28:27,205 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/6c20a52b6d6347ebae55d975439ad38b 2024-12-12T16:28:27,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/A/8422a16816bb448cbbde724f8cd37b0e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8422a16816bb448cbbde724f8cd37b0e 2024-12-12T16:28:27,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8422a16816bb448cbbde724f8cd37b0e, entries=100, sequenceid=445, filesize=9.6 K 2024-12-12T16:28:27,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/B/a33259af5fa24bc89541d1041af72c1c as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/a33259af5fa24bc89541d1041af72c1c 2024-12-12T16:28:27,215 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/a33259af5fa24bc89541d1041af72c1c, entries=100, sequenceid=445, filesize=9.6 K 2024-12-12T16:28:27,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/.tmp/C/6c20a52b6d6347ebae55d975439ad38b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6c20a52b6d6347ebae55d975439ad38b 2024-12-12T16:28:27,219 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6c20a52b6d6347ebae55d975439ad38b, entries=100, sequenceid=445, filesize=9.6 K 2024-12-12T16:28:27,219 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for d67e4a55402e756819e4e8a994aa3c46 in 1247ms, sequenceid=445, compaction requested=false 2024-12-12T16:28:27,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:27,219 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:27,219 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:27,219 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. after waiting 0 ms 2024-12-12T16:28:27,220 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:27,220 DEBUG [StoreCloser-TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/71a9db9776274381ba885bc1033bcbac, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/5b1d1adb837145258647a786077b9038, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/698e99fbf5f34ce282bda410d0e60fe1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/820d83d7d0b04bea9d638bc2040c1e44, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/1a4afef5f32f49ac97fb36edb4456f88, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/5d4dd3ce01f547a7bb1117d1b5e3f035, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/5cc8b925372146ec83f9c2ca14ff0650, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/d1e52be9744e44ecb0a55e39a4304ea4, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8ce4082439934c2f91ad2e3ea9786417, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c7931ad908e74f3e95051c8ba9b7f8e3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c52b5a51f1e9434dab076c2de9466afc, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/f85013b9c2364097b7cf41dcb7cd591e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/69eef87ce6a845d3872f1f7f5e66fd8b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c77337b0112d48cf947e58271be2a8b0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8be73b53c4a94bd491548350b9952686, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/f6bb45c48d3841b3ada8e61f2890f540, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/b39cb6b0ea2f42fabc2aff3a5499a725, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/634affaea444429d94aa0cd489dde909, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/6285c43c28824a01b04c66e9ffba8876, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/983be55cc8044e54bb7437bfa73652f6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/19dfff15fffd4e2c88945f7346805f98, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c8ecc55c9b1c4ee08623b775c9faaf2e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/da9afb064eb24862b7dfbff2a6ec5d2d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/9a9cc7131208447c83b00dc491d9ea74, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/e3de49636a894fb99dfe27b529f39c67, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/7c58a326c9264deea76cb48cb4484a4c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/859bc7ea55b64513b04cc8960ead1fdd, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/66b0e95c5dec41208ab2ea4babcd77b4, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c7ef12427f894b66887fbd0f568fe84f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8f2d6522e2a84fbeb752c55661c259bd, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/edd239db72cb46c4a418fc1d7ce15d70] to archive 2024-12-12T16:28:27,221 DEBUG [StoreCloser-TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:28:27,223 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/820d83d7d0b04bea9d638bc2040c1e44 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/820d83d7d0b04bea9d638bc2040c1e44 2024-12-12T16:28:27,223 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/5b1d1adb837145258647a786077b9038 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/5b1d1adb837145258647a786077b9038 2024-12-12T16:28:27,224 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/71a9db9776274381ba885bc1033bcbac to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/71a9db9776274381ba885bc1033bcbac 2024-12-12T16:28:27,224 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/698e99fbf5f34ce282bda410d0e60fe1 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/698e99fbf5f34ce282bda410d0e60fe1 2024-12-12T16:28:27,224 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/1a4afef5f32f49ac97fb36edb4456f88 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/1a4afef5f32f49ac97fb36edb4456f88 2024-12-12T16:28:27,224 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/5d4dd3ce01f547a7bb1117d1b5e3f035 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/5d4dd3ce01f547a7bb1117d1b5e3f035 2024-12-12T16:28:27,224 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/5cc8b925372146ec83f9c2ca14ff0650 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/5cc8b925372146ec83f9c2ca14ff0650 2024-12-12T16:28:27,224 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/d1e52be9744e44ecb0a55e39a4304ea4 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/d1e52be9744e44ecb0a55e39a4304ea4 2024-12-12T16:28:27,225 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8ce4082439934c2f91ad2e3ea9786417 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8ce4082439934c2f91ad2e3ea9786417 2024-12-12T16:28:27,226 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c7931ad908e74f3e95051c8ba9b7f8e3 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c7931ad908e74f3e95051c8ba9b7f8e3 2024-12-12T16:28:27,226 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8be73b53c4a94bd491548350b9952686 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8be73b53c4a94bd491548350b9952686 2024-12-12T16:28:27,226 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c52b5a51f1e9434dab076c2de9466afc to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c52b5a51f1e9434dab076c2de9466afc 2024-12-12T16:28:27,226 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c77337b0112d48cf947e58271be2a8b0 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c77337b0112d48cf947e58271be2a8b0 2024-12-12T16:28:27,227 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/69eef87ce6a845d3872f1f7f5e66fd8b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/69eef87ce6a845d3872f1f7f5e66fd8b 2024-12-12T16:28:27,227 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/f85013b9c2364097b7cf41dcb7cd591e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/f85013b9c2364097b7cf41dcb7cd591e 2024-12-12T16:28:27,227 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/f6bb45c48d3841b3ada8e61f2890f540 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/f6bb45c48d3841b3ada8e61f2890f540 2024-12-12T16:28:27,228 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/634affaea444429d94aa0cd489dde909 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/634affaea444429d94aa0cd489dde909 2024-12-12T16:28:27,228 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/b39cb6b0ea2f42fabc2aff3a5499a725 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/b39cb6b0ea2f42fabc2aff3a5499a725 2024-12-12T16:28:27,228 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/19dfff15fffd4e2c88945f7346805f98 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/19dfff15fffd4e2c88945f7346805f98 2024-12-12T16:28:27,229 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/6285c43c28824a01b04c66e9ffba8876 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/6285c43c28824a01b04c66e9ffba8876 2024-12-12T16:28:27,229 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/983be55cc8044e54bb7437bfa73652f6 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/983be55cc8044e54bb7437bfa73652f6 2024-12-12T16:28:27,229 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c8ecc55c9b1c4ee08623b775c9faaf2e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c8ecc55c9b1c4ee08623b775c9faaf2e 2024-12-12T16:28:27,229 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/9a9cc7131208447c83b00dc491d9ea74 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/9a9cc7131208447c83b00dc491d9ea74 2024-12-12T16:28:27,229 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/da9afb064eb24862b7dfbff2a6ec5d2d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/da9afb064eb24862b7dfbff2a6ec5d2d 2024-12-12T16:28:27,230 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/e3de49636a894fb99dfe27b529f39c67 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/e3de49636a894fb99dfe27b529f39c67 2024-12-12T16:28:27,230 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/7c58a326c9264deea76cb48cb4484a4c to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/7c58a326c9264deea76cb48cb4484a4c 2024-12-12T16:28:27,231 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/859bc7ea55b64513b04cc8960ead1fdd to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/859bc7ea55b64513b04cc8960ead1fdd 2024-12-12T16:28:27,231 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8f2d6522e2a84fbeb752c55661c259bd to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8f2d6522e2a84fbeb752c55661c259bd 2024-12-12T16:28:27,231 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c7ef12427f894b66887fbd0f568fe84f to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/c7ef12427f894b66887fbd0f568fe84f 2024-12-12T16:28:27,231 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/66b0e95c5dec41208ab2ea4babcd77b4 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/66b0e95c5dec41208ab2ea4babcd77b4 2024-12-12T16:28:27,231 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/edd239db72cb46c4a418fc1d7ce15d70 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/edd239db72cb46c4a418fc1d7ce15d70 2024-12-12T16:28:27,233 DEBUG [StoreCloser-TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/bd08930afd7e492bb1e03ba416f5dcdb, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/6495ab16f29c443f93649a24e74040a4, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/70e78de994ff44d6a7d6265d8c9c7999, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/4ef1bcd6c00144ec91a5ea709cbd33a5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/e84be560e6644ce1ad0a074ad8c38ad9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/8e504172c6104fe8bb7988430c8ed62a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ba98728bad2c46b0908c3ec33613d603, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/7038944329ed4b358c191a0f17e9c792, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/aac403d475804d0188c321599707f5ef, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/720b31a1b9c2426aa6abf73bcefb2521, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/222990dfb4f240daae23b501b70c4a3f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/0fc3ed8d1ada478cb4ee43df814a8d8c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ad66f8b7b0e8417696e98c525ce4552f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/267033eb2f324537b709b3d1723b820f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/e607f6dddab94eca94fc3f7b41d69423, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/fea62d04bc4d4908b084d0af79e4b9f0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/db1117c3b266422ebee0430788726f68, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/35da8f9f921d47deb9e88339f2a61df6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/61c4f771fe15475aa1b0461662820506, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/91f67ac4367241e7b378c0ef9ec5b1b5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/2c7c90f36dfb4116acdc98f6416046ee, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/f3f6e19180cc402a97f57fd1fcadc313, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ee70cc5485cd4c2ea9d90c01dc96e427, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/4874c08b95594af3a141c75fe5813ab5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/d871c30a6a76410a8dcbc8a0266d55d8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/33fb834fef674247bce473eee46de3b6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/93095537adb04f688455177fa486b221, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/37024600112640c9b2da1305037efe65, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/fb3ce461b4bd437d9a27230dbfe72485, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/70eab815ff1844d38bd738abc8e5c120, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/499f7d8cbb0044c694cc467dc8b71659] to archive 2024-12-12T16:28:27,234 DEBUG [StoreCloser-TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:28:27,236 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/4ef1bcd6c00144ec91a5ea709cbd33a5 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/4ef1bcd6c00144ec91a5ea709cbd33a5 2024-12-12T16:28:27,236 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/6495ab16f29c443f93649a24e74040a4 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/6495ab16f29c443f93649a24e74040a4 2024-12-12T16:28:27,237 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/70e78de994ff44d6a7d6265d8c9c7999 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/70e78de994ff44d6a7d6265d8c9c7999 2024-12-12T16:28:27,237 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/e84be560e6644ce1ad0a074ad8c38ad9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/e84be560e6644ce1ad0a074ad8c38ad9 2024-12-12T16:28:27,237 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/bd08930afd7e492bb1e03ba416f5dcdb to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/bd08930afd7e492bb1e03ba416f5dcdb 2024-12-12T16:28:27,237 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/8e504172c6104fe8bb7988430c8ed62a to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/8e504172c6104fe8bb7988430c8ed62a 2024-12-12T16:28:27,237 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ba98728bad2c46b0908c3ec33613d603 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ba98728bad2c46b0908c3ec33613d603 2024-12-12T16:28:27,238 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/7038944329ed4b358c191a0f17e9c792 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/7038944329ed4b358c191a0f17e9c792 2024-12-12T16:28:27,239 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/aac403d475804d0188c321599707f5ef to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/aac403d475804d0188c321599707f5ef 2024-12-12T16:28:27,239 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/222990dfb4f240daae23b501b70c4a3f to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/222990dfb4f240daae23b501b70c4a3f 2024-12-12T16:28:27,239 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ad66f8b7b0e8417696e98c525ce4552f to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ad66f8b7b0e8417696e98c525ce4552f 2024-12-12T16:28:27,239 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/720b31a1b9c2426aa6abf73bcefb2521 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/720b31a1b9c2426aa6abf73bcefb2521 2024-12-12T16:28:27,239 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/267033eb2f324537b709b3d1723b820f to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/267033eb2f324537b709b3d1723b820f 2024-12-12T16:28:27,240 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/0fc3ed8d1ada478cb4ee43df814a8d8c to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/0fc3ed8d1ada478cb4ee43df814a8d8c 2024-12-12T16:28:27,240 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/e607f6dddab94eca94fc3f7b41d69423 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/e607f6dddab94eca94fc3f7b41d69423 2024-12-12T16:28:27,240 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/fea62d04bc4d4908b084d0af79e4b9f0 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/fea62d04bc4d4908b084d0af79e4b9f0 2024-12-12T16:28:27,241 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/35da8f9f921d47deb9e88339f2a61df6 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/35da8f9f921d47deb9e88339f2a61df6 2024-12-12T16:28:27,241 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/db1117c3b266422ebee0430788726f68 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/db1117c3b266422ebee0430788726f68 2024-12-12T16:28:27,241 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/2c7c90f36dfb4116acdc98f6416046ee to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/2c7c90f36dfb4116acdc98f6416046ee 2024-12-12T16:28:27,241 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/61c4f771fe15475aa1b0461662820506 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/61c4f771fe15475aa1b0461662820506 2024-12-12T16:28:27,242 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/f3f6e19180cc402a97f57fd1fcadc313 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/f3f6e19180cc402a97f57fd1fcadc313 2024-12-12T16:28:27,242 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/4874c08b95594af3a141c75fe5813ab5 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/4874c08b95594af3a141c75fe5813ab5 2024-12-12T16:28:27,242 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/91f67ac4367241e7b378c0ef9ec5b1b5 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/91f67ac4367241e7b378c0ef9ec5b1b5 2024-12-12T16:28:27,242 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ee70cc5485cd4c2ea9d90c01dc96e427 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ee70cc5485cd4c2ea9d90c01dc96e427 2024-12-12T16:28:27,243 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/d871c30a6a76410a8dcbc8a0266d55d8 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/d871c30a6a76410a8dcbc8a0266d55d8 2024-12-12T16:28:27,243 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/93095537adb04f688455177fa486b221 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/93095537adb04f688455177fa486b221 2024-12-12T16:28:27,243 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/fb3ce461b4bd437d9a27230dbfe72485 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/fb3ce461b4bd437d9a27230dbfe72485 2024-12-12T16:28:27,243 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/70eab815ff1844d38bd738abc8e5c120 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/70eab815ff1844d38bd738abc8e5c120 2024-12-12T16:28:27,244 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/33fb834fef674247bce473eee46de3b6 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/33fb834fef674247bce473eee46de3b6 2024-12-12T16:28:27,244 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/37024600112640c9b2da1305037efe65 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/37024600112640c9b2da1305037efe65 2024-12-12T16:28:27,244 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/499f7d8cbb0044c694cc467dc8b71659 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/499f7d8cbb0044c694cc467dc8b71659 2024-12-12T16:28:27,245 DEBUG [StoreCloser-TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/75616725e61e412ba3997e35a8a1f610, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/ac26b4da84144feda68b3dc5b87cf883, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/03b7ed85d0aa4b76b6a66574d9eef649, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6f14e45de1e04d7ebb8597edaacba141, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/ff8ca45548f247d1bd8c981b78bee8a9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6605ac8483d34b369a9ebd3840178d75, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/70552faf2cab4087bf0381beb58b6db0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/39461245250343ad96fb91c423f3a375, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/65fc487e454d469abfec1aad0993d1ff, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/d7aaf0fa4a54429c88210a4774bedfd0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/87d7fb1636b64489a9801da8871ec36b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/10b0d8cb9ae749f49fa2f95d4124356e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/8ec790e2cdb84b3eaa4ab69c0bd8193c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/aaf44d78aa4f429c9d42bf21def57e38, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/b521c03e22fc4171a62e3263bfb7b757, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/ef8f17ba8d994921be3acb177715697e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/82786d3ec8704ba0b92eff66192cf911, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6817c5899c7b47039d86ad9726729e66, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/927324f133744e73985e7d5e8f196cd3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/5005bb446a4a460ea3af5fbf2d93fec1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/e49356ac40364677969dbc411eed004b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/5f98dee9aa8248b5ad449b0cf891e172, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/f3fe504acb5c45d69774ada363464527, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/2dc397eb5094484d86723a4873fd1782, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/4cdd70df7896414f90e504d1e53e03d8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/e9c9ecd3dcfe4233b83f9e674304a7c9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/0a4910d10d2a49bcb12566ac6918189e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/447a1f5bdd634757a17970ec64f514e9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/409d902004b0483c96ed218ff4512220, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/4c8bdd3f8cdf4c93be09074430b74658, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/4878dd8a5c68414aa803593ce9e302b3] to archive 2024-12-12T16:28:27,246 DEBUG [StoreCloser-TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:28:27,248 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/75616725e61e412ba3997e35a8a1f610 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/75616725e61e412ba3997e35a8a1f610 2024-12-12T16:28:27,248 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/ac26b4da84144feda68b3dc5b87cf883 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/ac26b4da84144feda68b3dc5b87cf883 2024-12-12T16:28:27,248 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/03b7ed85d0aa4b76b6a66574d9eef649 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/03b7ed85d0aa4b76b6a66574d9eef649 2024-12-12T16:28:27,248 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6f14e45de1e04d7ebb8597edaacba141 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6f14e45de1e04d7ebb8597edaacba141 2024-12-12T16:28:27,248 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6605ac8483d34b369a9ebd3840178d75 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6605ac8483d34b369a9ebd3840178d75 2024-12-12T16:28:27,249 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/39461245250343ad96fb91c423f3a375 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/39461245250343ad96fb91c423f3a375 2024-12-12T16:28:27,249 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/ff8ca45548f247d1bd8c981b78bee8a9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/ff8ca45548f247d1bd8c981b78bee8a9 2024-12-12T16:28:27,249 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/70552faf2cab4087bf0381beb58b6db0 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/70552faf2cab4087bf0381beb58b6db0 2024-12-12T16:28:27,250 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/65fc487e454d469abfec1aad0993d1ff to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/65fc487e454d469abfec1aad0993d1ff 2024-12-12T16:28:27,250 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/87d7fb1636b64489a9801da8871ec36b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/87d7fb1636b64489a9801da8871ec36b 2024-12-12T16:28:27,250 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/8ec790e2cdb84b3eaa4ab69c0bd8193c to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/8ec790e2cdb84b3eaa4ab69c0bd8193c 2024-12-12T16:28:27,250 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/d7aaf0fa4a54429c88210a4774bedfd0 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/d7aaf0fa4a54429c88210a4774bedfd0 2024-12-12T16:28:27,251 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/10b0d8cb9ae749f49fa2f95d4124356e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/10b0d8cb9ae749f49fa2f95d4124356e 2024-12-12T16:28:27,251 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/b521c03e22fc4171a62e3263bfb7b757 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/b521c03e22fc4171a62e3263bfb7b757 2024-12-12T16:28:27,252 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/aaf44d78aa4f429c9d42bf21def57e38 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/aaf44d78aa4f429c9d42bf21def57e38 2024-12-12T16:28:27,252 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/ef8f17ba8d994921be3acb177715697e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/ef8f17ba8d994921be3acb177715697e 2024-12-12T16:28:27,252 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/82786d3ec8704ba0b92eff66192cf911 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/82786d3ec8704ba0b92eff66192cf911 2024-12-12T16:28:27,252 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/927324f133744e73985e7d5e8f196cd3 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/927324f133744e73985e7d5e8f196cd3 2024-12-12T16:28:27,253 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/5005bb446a4a460ea3af5fbf2d93fec1 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/5005bb446a4a460ea3af5fbf2d93fec1 2024-12-12T16:28:27,253 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6817c5899c7b47039d86ad9726729e66 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6817c5899c7b47039d86ad9726729e66 2024-12-12T16:28:27,253 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/e49356ac40364677969dbc411eed004b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/e49356ac40364677969dbc411eed004b 2024-12-12T16:28:27,253 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/5f98dee9aa8248b5ad449b0cf891e172 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/5f98dee9aa8248b5ad449b0cf891e172 2024-12-12T16:28:27,255 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/4cdd70df7896414f90e504d1e53e03d8 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/4cdd70df7896414f90e504d1e53e03d8 2024-12-12T16:28:27,255 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/2dc397eb5094484d86723a4873fd1782 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/2dc397eb5094484d86723a4873fd1782 2024-12-12T16:28:27,255 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/e9c9ecd3dcfe4233b83f9e674304a7c9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/e9c9ecd3dcfe4233b83f9e674304a7c9 2024-12-12T16:28:27,255 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/0a4910d10d2a49bcb12566ac6918189e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/0a4910d10d2a49bcb12566ac6918189e 2024-12-12T16:28:27,255 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/409d902004b0483c96ed218ff4512220 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/409d902004b0483c96ed218ff4512220 2024-12-12T16:28:27,255 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/f3fe504acb5c45d69774ada363464527 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/f3fe504acb5c45d69774ada363464527 2024-12-12T16:28:27,255 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/447a1f5bdd634757a17970ec64f514e9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/447a1f5bdd634757a17970ec64f514e9 2024-12-12T16:28:27,256 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/4c8bdd3f8cdf4c93be09074430b74658 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/4c8bdd3f8cdf4c93be09074430b74658 2024-12-12T16:28:27,256 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/4878dd8a5c68414aa803593ce9e302b3 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/4878dd8a5c68414aa803593ce9e302b3 2024-12-12T16:28:27,260 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/recovered.edits/448.seqid, newMaxSeqId=448, maxSeqId=1 2024-12-12T16:28:27,261 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46. 2024-12-12T16:28:27,261 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1635): Region close journal for d67e4a55402e756819e4e8a994aa3c46: 2024-12-12T16:28:27,263 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(170): Closed d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:27,263 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=d67e4a55402e756819e4e8a994aa3c46, regionState=CLOSED 2024-12-12T16:28:27,265 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-12T16:28:27,265 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseRegionProcedure d67e4a55402e756819e4e8a994aa3c46, server=4f6a4780a2f6,41933,1734020809476 in 1.2790 sec 2024-12-12T16:28:27,266 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-12-12T16:28:27,267 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d67e4a55402e756819e4e8a994aa3c46, UNASSIGN in 1.2820 sec 2024-12-12T16:28:27,268 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-12-12T16:28:27,268 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.2860 sec 2024-12-12T16:28:27,269 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020907269"}]},"ts":"1734020907269"} 2024-12-12T16:28:27,270 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T16:28:27,272 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T16:28:27,273 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.2990 sec 2024-12-12T16:28:28,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-12T16:28:28,081 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-12-12T16:28:28,081 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T16:28:28,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:28:28,083 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:28:28,083 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:28:28,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-12T16:28:28,085 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:28,087 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A, FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B, FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C, FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/recovered.edits] 2024-12-12T16:28:28,090 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8422a16816bb448cbbde724f8cd37b0e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/8422a16816bb448cbbde724f8cd37b0e 2024-12-12T16:28:28,090 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/0e9b1368d8df40429edc3f39236de2d8 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/A/0e9b1368d8df40429edc3f39236de2d8 2024-12-12T16:28:28,093 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/a33259af5fa24bc89541d1041af72c1c to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/a33259af5fa24bc89541d1041af72c1c 2024-12-12T16:28:28,093 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ba42f27311634018921728c6d2a0a66c to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/B/ba42f27311634018921728c6d2a0a66c 2024-12-12T16:28:28,095 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/25755eafcd15492bab9372061f01fc41 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/25755eafcd15492bab9372061f01fc41 2024-12-12T16:28:28,095 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6c20a52b6d6347ebae55d975439ad38b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/C/6c20a52b6d6347ebae55d975439ad38b 2024-12-12T16:28:28,098 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/recovered.edits/448.seqid to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46/recovered.edits/448.seqid 2024-12-12T16:28:28,098 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/d67e4a55402e756819e4e8a994aa3c46 2024-12-12T16:28:28,098 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T16:28:28,100 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:28:28,103 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T16:28:28,105 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T16:28:28,106 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:28:28,106 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T16:28:28,106 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734020908106"}]},"ts":"9223372036854775807"} 2024-12-12T16:28:28,108 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T16:28:28,108 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => d67e4a55402e756819e4e8a994aa3c46, NAME => 'TestAcidGuarantees,,1734020878111.d67e4a55402e756819e4e8a994aa3c46.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T16:28:28,108 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T16:28:28,108 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734020908108"}]},"ts":"9223372036854775807"} 2024-12-12T16:28:28,109 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T16:28:28,112 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:28:28,113 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 31 msec 2024-12-12T16:28:28,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-12T16:28:28,185 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 95 completed 2024-12-12T16:28:28,194 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=244 (was 247), OpenFileDescriptor=450 (was 459), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=359 (was 364), ProcessCount=11 (was 11), AvailableMemoryMB=7730 (was 7807) 2024-12-12T16:28:28,203 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=244, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=359, ProcessCount=11, AvailableMemoryMB=7730 2024-12-12T16:28:28,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T16:28:28,205 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T16:28:28,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T16:28:28,206 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T16:28:28,207 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:28,207 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 96 2024-12-12T16:28:28,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-12T16:28:28,207 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T16:28:28,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742178_1354 (size=963) 2024-12-12T16:28:28,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-12T16:28:28,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-12T16:28:28,615 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc 2024-12-12T16:28:28,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742179_1355 (size=53) 2024-12-12T16:28:28,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-12T16:28:29,021 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:28:29,021 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 53789b78ac3c456afdd9ca3b09fff4b9, disabling compactions & flushes 2024-12-12T16:28:29,021 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:29,021 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:29,021 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. after waiting 0 ms 2024-12-12T16:28:29,021 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:29,021 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:29,021 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:29,022 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T16:28:29,023 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734020909022"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734020909022"}]},"ts":"1734020909022"} 2024-12-12T16:28:29,024 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T16:28:29,024 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T16:28:29,025 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020909024"}]},"ts":"1734020909024"} 2024-12-12T16:28:29,025 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T16:28:29,029 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=53789b78ac3c456afdd9ca3b09fff4b9, ASSIGN}] 2024-12-12T16:28:29,030 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=53789b78ac3c456afdd9ca3b09fff4b9, ASSIGN 2024-12-12T16:28:29,031 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=53789b78ac3c456afdd9ca3b09fff4b9, ASSIGN; state=OFFLINE, location=4f6a4780a2f6,41933,1734020809476; forceNewPlan=false, retain=false 2024-12-12T16:28:29,181 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=53789b78ac3c456afdd9ca3b09fff4b9, regionState=OPENING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:29,182 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; OpenRegionProcedure 53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:28:29,216 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-12T16:28:29,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-12T16:28:29,333 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:29,336 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:29,336 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7285): Opening region: {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} 2024-12-12T16:28:29,336 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:29,336 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:28:29,336 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7327): checking encryption for 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:29,336 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7330): checking classloading for 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:29,337 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:29,339 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:28:29,339 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 53789b78ac3c456afdd9ca3b09fff4b9 columnFamilyName A 2024-12-12T16:28:29,339 DEBUG [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:29,339 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] regionserver.HStore(327): Store=53789b78ac3c456afdd9ca3b09fff4b9/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:28:29,339 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:29,340 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:28:29,340 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 53789b78ac3c456afdd9ca3b09fff4b9 columnFamilyName B 2024-12-12T16:28:29,340 DEBUG [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:29,341 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] regionserver.HStore(327): Store=53789b78ac3c456afdd9ca3b09fff4b9/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:28:29,341 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:29,341 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:28:29,341 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 53789b78ac3c456afdd9ca3b09fff4b9 columnFamilyName C 2024-12-12T16:28:29,341 DEBUG [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:29,342 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] regionserver.HStore(327): Store=53789b78ac3c456afdd9ca3b09fff4b9/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:28:29,342 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:29,342 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:29,343 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:29,343 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T16:28:29,344 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1085): writing seq id for 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:29,346 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T16:28:29,346 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1102): Opened 53789b78ac3c456afdd9ca3b09fff4b9; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68026997, jitterRate=0.01368124783039093}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T16:28:29,347 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1001): Region open journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:29,347 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9., pid=98, masterSystemTime=1734020909333 2024-12-12T16:28:29,349 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:29,349 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:29,350 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=53789b78ac3c456afdd9ca3b09fff4b9, regionState=OPEN, openSeqNum=2, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:29,352 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-12-12T16:28:29,352 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; OpenRegionProcedure 53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 in 169 msec 2024-12-12T16:28:29,353 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-12-12T16:28:29,353 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=53789b78ac3c456afdd9ca3b09fff4b9, ASSIGN in 323 msec 2024-12-12T16:28:29,354 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T16:28:29,354 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020909354"}]},"ts":"1734020909354"} 2024-12-12T16:28:29,355 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T16:28:29,357 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T16:28:29,358 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1520 sec 2024-12-12T16:28:30,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-12T16:28:30,311 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 96 completed 2024-12-12T16:28:30,313 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7b6cf8cb to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72f422b4 2024-12-12T16:28:30,316 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc42ea6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:30,317 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:30,319 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59076, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:30,319 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T16:28:30,320 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34200, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T16:28:30,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T16:28:30,322 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T16:28:30,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-12T16:28:30,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742180_1356 (size=999) 2024-12-12T16:28:30,733 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-12T16:28:30,733 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-12T16:28:30,734 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T16:28:30,736 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=53789b78ac3c456afdd9ca3b09fff4b9, REOPEN/MOVE}] 2024-12-12T16:28:30,737 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=53789b78ac3c456afdd9ca3b09fff4b9, REOPEN/MOVE 2024-12-12T16:28:30,737 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=53789b78ac3c456afdd9ca3b09fff4b9, regionState=CLOSING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:30,738 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T16:28:30,738 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE; CloseRegionProcedure 53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:28:30,889 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:30,890 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(124): Close 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:30,890 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T16:28:30,890 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1681): Closing 53789b78ac3c456afdd9ca3b09fff4b9, disabling compactions & flushes 2024-12-12T16:28:30,890 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:30,890 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:30,890 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. after waiting 0 ms 2024-12-12T16:28:30,890 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:30,894 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-12T16:28:30,894 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:30,894 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1635): Region close journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:30,894 WARN [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegionServer(3786): Not adding moved region record: 53789b78ac3c456afdd9ca3b09fff4b9 to self. 2024-12-12T16:28:30,896 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(170): Closed 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:30,896 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=53789b78ac3c456afdd9ca3b09fff4b9, regionState=CLOSED 2024-12-12T16:28:30,898 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-12-12T16:28:30,898 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; CloseRegionProcedure 53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 in 159 msec 2024-12-12T16:28:30,898 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=53789b78ac3c456afdd9ca3b09fff4b9, REOPEN/MOVE; state=CLOSED, location=4f6a4780a2f6,41933,1734020809476; forceNewPlan=false, retain=true 2024-12-12T16:28:31,049 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=53789b78ac3c456afdd9ca3b09fff4b9, regionState=OPENING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,050 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=101, state=RUNNABLE; OpenRegionProcedure 53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:28:31,201 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,204 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:31,204 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7285): Opening region: {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} 2024-12-12T16:28:31,205 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:31,205 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:28:31,205 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7327): checking encryption for 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:31,205 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7330): checking classloading for 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:31,206 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:31,207 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:28:31,207 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 53789b78ac3c456afdd9ca3b09fff4b9 columnFamilyName A 2024-12-12T16:28:31,208 DEBUG [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:31,208 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] regionserver.HStore(327): Store=53789b78ac3c456afdd9ca3b09fff4b9/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:28:31,209 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:31,209 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:28:31,209 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 53789b78ac3c456afdd9ca3b09fff4b9 columnFamilyName B 2024-12-12T16:28:31,209 DEBUG [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:31,210 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] regionserver.HStore(327): Store=53789b78ac3c456afdd9ca3b09fff4b9/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:28:31,210 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:31,210 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:28:31,210 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 53789b78ac3c456afdd9ca3b09fff4b9 columnFamilyName C 2024-12-12T16:28:31,210 DEBUG [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:31,211 INFO [StoreOpener-53789b78ac3c456afdd9ca3b09fff4b9-1 {}] regionserver.HStore(327): Store=53789b78ac3c456afdd9ca3b09fff4b9/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:28:31,211 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:31,211 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:31,212 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:31,213 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T16:28:31,214 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1085): writing seq id for 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:31,215 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1102): Opened 53789b78ac3c456afdd9ca3b09fff4b9; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60513271, jitterRate=-0.09828199446201324}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T16:28:31,215 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1001): Region open journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:31,216 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9., pid=103, masterSystemTime=1734020911201 2024-12-12T16:28:31,218 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:31,218 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:31,218 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=53789b78ac3c456afdd9ca3b09fff4b9, regionState=OPEN, openSeqNum=5, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,220 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=101 2024-12-12T16:28:31,220 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=101, state=SUCCESS; OpenRegionProcedure 53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 in 169 msec 2024-12-12T16:28:31,221 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-12-12T16:28:31,221 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=53789b78ac3c456afdd9ca3b09fff4b9, REOPEN/MOVE in 484 msec 2024-12-12T16:28:31,222 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-12-12T16:28:31,222 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 487 msec 2024-12-12T16:28:31,224 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 901 msec 2024-12-12T16:28:31,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-12T16:28:31,226 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ec15031 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2df33cdf 2024-12-12T16:28:31,232 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@117e86d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:31,233 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3dd5b441 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9f472e0 2024-12-12T16:28:31,236 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd96549, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:31,237 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c336ea4 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@167a78b0 2024-12-12T16:28:31,239 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31aea41b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:31,240 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f94d721 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5aee939b 2024-12-12T16:28:31,243 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e247aa1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:31,244 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x319559be to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f49665c 2024-12-12T16:28:31,246 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2205f666, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:31,247 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x683f8469 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6584e9ce 2024-12-12T16:28:31,251 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e3203d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:31,251 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75e4d3d0 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37ec8e3b 2024-12-12T16:28:31,254 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@798e7fd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:31,255 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b308f62 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@787e5169 2024-12-12T16:28:31,257 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7284f16d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:31,258 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68035c67 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@627cad17 2024-12-12T16:28:31,260 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37a637ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:31,261 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3eab689a to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39387e4d 2024-12-12T16:28:31,264 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fa53591, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:28:31,266 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:28:31,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-12-12T16:28:31,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T16:28:31,268 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:28:31,269 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:28:31,269 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:28:31,269 DEBUG [hconnection-0x2ac12617-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:31,269 DEBUG [hconnection-0xc05cf53-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:31,270 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59094, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:31,270 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59092, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:31,272 DEBUG [hconnection-0x3273eb9a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:31,272 DEBUG [hconnection-0xcf0b5ce-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:31,272 DEBUG [hconnection-0x233e44e0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:31,273 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59106, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:31,273 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59122, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:31,273 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59138, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:31,274 DEBUG [hconnection-0xb374033-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:31,274 DEBUG [hconnection-0x7aa9ea6d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:31,274 DEBUG [hconnection-0x30f324ff-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:31,275 DEBUG [hconnection-0x5de1cede-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:31,275 DEBUG [hconnection-0x79f77565-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:28:31,275 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59154, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:31,276 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59156, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:31,277 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59166, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:31,277 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59190, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:31,277 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59174, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:28:31,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:31,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53789b78ac3c456afdd9ca3b09fff4b9 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:28:31,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=A 2024-12-12T16:28:31,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:31,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=B 2024-12-12T16:28:31,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:31,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=C 2024-12-12T16:28:31,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:31,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123adb7f35931b4166921e091bc587134e_53789b78ac3c456afdd9ca3b09fff4b9 is 50, key is test_row_0/A:col10/1734020911281/Put/seqid=0 2024-12-12T16:28:31,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742181_1357 (size=12154) 2024-12-12T16:28:31,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020971327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020971327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020971333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020971334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020971334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T16:28:31,420 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,421 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T16:28:31,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:31,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:31,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:31,421 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:31,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:31,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:31,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020971435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020971435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020971435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020971441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,442 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020971442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T16:28:31,574 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,574 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T16:28:31,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:31,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:31,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:31,574 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:31,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:31,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:31,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020971639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020971639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020971639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020971643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020971644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,712 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:31,716 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123adb7f35931b4166921e091bc587134e_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123adb7f35931b4166921e091bc587134e_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:31,717 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/571dc34ce88f4262b8c75d180ab4eb0d, store: [table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:31,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/571dc34ce88f4262b8c75d180ab4eb0d is 175, key is test_row_0/A:col10/1734020911281/Put/seqid=0 2024-12-12T16:28:31,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742182_1358 (size=30955) 2024-12-12T16:28:31,726 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,726 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T16:28:31,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:31,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:31,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:31,727 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:31,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:31,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:31,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T16:28:31,878 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,879 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T16:28:31,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:31,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:31,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:31,879 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:31,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:31,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:31,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020971945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020971945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020971945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020971945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:31,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:31,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020971949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:32,031 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:32,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T16:28:32,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:32,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:32,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:32,032 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:32,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:32,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:32,122 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/571dc34ce88f4262b8c75d180ab4eb0d 2024-12-12T16:28:32,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/cc060933e4894e4a9fd02eb9f1a3b124 is 50, key is test_row_0/B:col10/1734020911281/Put/seqid=0 2024-12-12T16:28:32,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742183_1359 (size=12001) 2024-12-12T16:28:32,150 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/cc060933e4894e4a9fd02eb9f1a3b124 2024-12-12T16:28:32,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/fb36588fe9374dec920e6baf9983c044 is 50, key is test_row_0/C:col10/1734020911281/Put/seqid=0 2024-12-12T16:28:32,185 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:32,185 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T16:28:32,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:32,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:32,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:32,185 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:32,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:32,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:32,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742184_1360 (size=12001) 2024-12-12T16:28:32,198 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/fb36588fe9374dec920e6baf9983c044 2024-12-12T16:28:32,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/571dc34ce88f4262b8c75d180ab4eb0d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/571dc34ce88f4262b8c75d180ab4eb0d 2024-12-12T16:28:32,208 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/571dc34ce88f4262b8c75d180ab4eb0d, entries=150, sequenceid=15, filesize=30.2 K 2024-12-12T16:28:32,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/cc060933e4894e4a9fd02eb9f1a3b124 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/cc060933e4894e4a9fd02eb9f1a3b124 2024-12-12T16:28:32,213 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/cc060933e4894e4a9fd02eb9f1a3b124, entries=150, sequenceid=15, filesize=11.7 K 2024-12-12T16:28:32,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/fb36588fe9374dec920e6baf9983c044 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/fb36588fe9374dec920e6baf9983c044 2024-12-12T16:28:32,219 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/fb36588fe9374dec920e6baf9983c044, entries=150, sequenceid=15, filesize=11.7 K 2024-12-12T16:28:32,220 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 53789b78ac3c456afdd9ca3b09fff4b9 in 938ms, sequenceid=15, compaction requested=false 2024-12-12T16:28:32,220 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-12T16:28:32,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:32,338 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:32,338 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-12T16:28:32,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:32,338 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing 53789b78ac3c456afdd9ca3b09fff4b9 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T16:28:32,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=A 2024-12-12T16:28:32,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:32,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=B 2024-12-12T16:28:32,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:32,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=C 2024-12-12T16:28:32,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:32,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c41be29d9981440895600b6ce03d2d89_53789b78ac3c456afdd9ca3b09fff4b9 is 50, key is test_row_0/A:col10/1734020911332/Put/seqid=0 2024-12-12T16:28:32,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T16:28:32,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742185_1361 (size=12154) 2024-12-12T16:28:32,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:32,383 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c41be29d9981440895600b6ce03d2d89_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c41be29d9981440895600b6ce03d2d89_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:32,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/ba80a12c0b8f49bdbe5f3e72912b37d3, store: [table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:32,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/ba80a12c0b8f49bdbe5f3e72912b37d3 is 175, key is test_row_0/A:col10/1734020911332/Put/seqid=0 2024-12-12T16:28:32,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742186_1362 (size=30955) 2024-12-12T16:28:32,408 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/ba80a12c0b8f49bdbe5f3e72912b37d3 2024-12-12T16:28:32,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/ee56b8e7d56946dbbe6dd5d1237f8e97 is 50, key is test_row_0/B:col10/1734020911332/Put/seqid=0 2024-12-12T16:28:32,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742187_1363 (size=12001) 2024-12-12T16:28:32,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:32,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:32,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:32,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020972459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:32,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:32,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020972459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:32,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:32,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020972460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:32,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:32,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020972464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:32,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:32,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020972464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:32,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:32,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020972565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:32,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:32,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020972565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:32,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:32,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020972566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:32,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:32,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020972569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:32,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:32,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020972569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:32,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:32,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020972768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:32,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:32,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020972769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:32,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:32,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020972769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:32,775 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:32,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020972774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:32,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:32,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020972774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:32,792 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T16:28:32,821 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/ee56b8e7d56946dbbe6dd5d1237f8e97 2024-12-12T16:28:32,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/75b0c29b87124b04944401002787db7b is 50, key is test_row_0/C:col10/1734020911332/Put/seqid=0 2024-12-12T16:28:32,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742188_1364 (size=12001) 2024-12-12T16:28:33,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020973073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020973073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020973073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,080 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020973077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020973077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,246 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/75b0c29b87124b04944401002787db7b 2024-12-12T16:28:33,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/ba80a12c0b8f49bdbe5f3e72912b37d3 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/ba80a12c0b8f49bdbe5f3e72912b37d3 2024-12-12T16:28:33,258 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/ba80a12c0b8f49bdbe5f3e72912b37d3, entries=150, sequenceid=40, filesize=30.2 K 2024-12-12T16:28:33,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/ee56b8e7d56946dbbe6dd5d1237f8e97 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/ee56b8e7d56946dbbe6dd5d1237f8e97 2024-12-12T16:28:33,262 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/ee56b8e7d56946dbbe6dd5d1237f8e97, entries=150, sequenceid=40, filesize=11.7 K 2024-12-12T16:28:33,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/75b0c29b87124b04944401002787db7b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/75b0c29b87124b04944401002787db7b 2024-12-12T16:28:33,267 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/75b0c29b87124b04944401002787db7b, entries=150, sequenceid=40, filesize=11.7 K 2024-12-12T16:28:33,267 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 53789b78ac3c456afdd9ca3b09fff4b9 in 929ms, sequenceid=40, compaction requested=false 2024-12-12T16:28:33,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:33,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:33,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-12T16:28:33,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-12T16:28:33,270 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-12T16:28:33,270 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0000 sec 2024-12-12T16:28:33,271 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 2.0040 sec 2024-12-12T16:28:33,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T16:28:33,372 INFO [Thread-1616 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-12-12T16:28:33,373 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:28:33,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-12-12T16:28:33,375 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:28:33,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-12T16:28:33,376 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:28:33,376 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:28:33,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-12T16:28:33,527 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,528 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-12T16:28:33,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:33,528 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 53789b78ac3c456afdd9ca3b09fff4b9 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:28:33,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=A 2024-12-12T16:28:33,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:33,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=B 2024-12-12T16:28:33,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:33,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=C 2024-12-12T16:28:33,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:33,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212cf808ccf5d5140e6ad272145fbbf2e51_53789b78ac3c456afdd9ca3b09fff4b9 is 50, key is test_row_0/A:col10/1734020912462/Put/seqid=0 2024-12-12T16:28:33,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742189_1365 (size=12154) 2024-12-12T16:28:33,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:33,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:33,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020973604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020973605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,613 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020973606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020973609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020973610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-12T16:28:33,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020973712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020973713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020973714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020973716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020973716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,918 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020973917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020973917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020973917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020973921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:33,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020973923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:33,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:33,947 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212cf808ccf5d5140e6ad272145fbbf2e51_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212cf808ccf5d5140e6ad272145fbbf2e51_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:33,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/a8af27310a8f44c38729f3c8101c73d7, store: [table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:33,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/a8af27310a8f44c38729f3c8101c73d7 is 175, key is test_row_0/A:col10/1734020912462/Put/seqid=0 2024-12-12T16:28:33,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742190_1366 (size=30955) 2024-12-12T16:28:33,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-12T16:28:34,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:34,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020974220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:34,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:34,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020974221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:34,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:34,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020974222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:34,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:34,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020974227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:34,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:34,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020974228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:34,354 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=51, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/a8af27310a8f44c38729f3c8101c73d7 2024-12-12T16:28:34,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/fcf822a41aca41de8be14bb5671c4a55 is 50, key is test_row_0/B:col10/1734020912462/Put/seqid=0 2024-12-12T16:28:34,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742191_1367 (size=12001) 2024-12-12T16:28:34,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-12T16:28:34,725 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:34,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020974725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:34,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:34,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020974727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:34,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:34,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020974728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:34,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:34,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020974732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:34,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:34,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020974732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:34,768 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/fcf822a41aca41de8be14bb5671c4a55 2024-12-12T16:28:34,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/3a2e4a472846441cb8a8a0fc90bc1a8e is 50, key is test_row_0/C:col10/1734020912462/Put/seqid=0 2024-12-12T16:28:34,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742192_1368 (size=12001) 2024-12-12T16:28:34,790 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/3a2e4a472846441cb8a8a0fc90bc1a8e 2024-12-12T16:28:34,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/a8af27310a8f44c38729f3c8101c73d7 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/a8af27310a8f44c38729f3c8101c73d7 2024-12-12T16:28:34,799 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/a8af27310a8f44c38729f3c8101c73d7, entries=150, sequenceid=51, filesize=30.2 K 2024-12-12T16:28:34,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/fcf822a41aca41de8be14bb5671c4a55 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/fcf822a41aca41de8be14bb5671c4a55 2024-12-12T16:28:34,803 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/fcf822a41aca41de8be14bb5671c4a55, entries=150, sequenceid=51, filesize=11.7 K 2024-12-12T16:28:34,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/3a2e4a472846441cb8a8a0fc90bc1a8e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/3a2e4a472846441cb8a8a0fc90bc1a8e 2024-12-12T16:28:34,808 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/3a2e4a472846441cb8a8a0fc90bc1a8e, entries=150, sequenceid=51, filesize=11.7 K 2024-12-12T16:28:34,809 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 53789b78ac3c456afdd9ca3b09fff4b9 in 1280ms, sequenceid=51, compaction requested=true 2024-12-12T16:28:34,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:34,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:34,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-12-12T16:28:34,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-12-12T16:28:34,811 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-12-12T16:28:34,811 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4340 sec 2024-12-12T16:28:34,812 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 1.4380 sec 2024-12-12T16:28:35,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-12T16:28:35,480 INFO [Thread-1616 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-12-12T16:28:35,481 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:28:35,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-12-12T16:28:35,483 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:28:35,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T16:28:35,484 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:28:35,484 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:28:35,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T16:28:35,635 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:35,636 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-12T16:28:35,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:35,637 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 53789b78ac3c456afdd9ca3b09fff4b9 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T16:28:35,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=A 2024-12-12T16:28:35,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:35,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=B 2024-12-12T16:28:35,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:35,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=C 2024-12-12T16:28:35,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:35,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212ed7e2e53988544e7bed043fd7613b5de_53789b78ac3c456afdd9ca3b09fff4b9 is 50, key is test_row_0/A:col10/1734020913609/Put/seqid=0 2024-12-12T16:28:35,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742193_1369 (size=12154) 2024-12-12T16:28:35,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:35,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:35,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:35,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020975748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:35,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:35,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020975748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:35,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:35,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020975749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:35,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:35,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020975752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:35,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:35,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020975753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:35,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T16:28:35,860 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:35,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020975855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:35,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:35,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020975855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:35,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:35,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020975855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:35,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:35,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020975858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:35,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:35,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020975858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:36,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:36,062 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212ed7e2e53988544e7bed043fd7613b5de_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ed7e2e53988544e7bed043fd7613b5de_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:36,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/f3d834f71cc840ae813eaf8a8db42463, store: [table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:36,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/f3d834f71cc840ae813eaf8a8db42463 is 175, key is test_row_0/A:col10/1734020913609/Put/seqid=0 2024-12-12T16:28:36,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:36,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020976063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:36,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:36,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020976064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:36,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:36,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020976064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:36,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:36,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020976064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:36,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742194_1370 (size=30955) 2024-12-12T16:28:36,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:36,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020976065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:36,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T16:28:36,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:36,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020976367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:36,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:36,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020976368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:36,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:36,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020976368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:36,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:36,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020976369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:36,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:36,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020976372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:36,469 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=76, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/f3d834f71cc840ae813eaf8a8db42463 2024-12-12T16:28:36,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/c6b922adeb87435195cc3056128859e7 is 50, key is test_row_0/B:col10/1734020913609/Put/seqid=0 2024-12-12T16:28:36,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742195_1371 (size=12001) 2024-12-12T16:28:36,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T16:28:36,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:36,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020976872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:36,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:36,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020976874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:36,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:36,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020976874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:36,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:36,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020976876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:36,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:36,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020976877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:36,883 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/c6b922adeb87435195cc3056128859e7 2024-12-12T16:28:36,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/bb7b85fd830a4622a81cb6089f70c8f9 is 50, key is test_row_0/C:col10/1734020913609/Put/seqid=0 2024-12-12T16:28:36,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742196_1372 (size=12001) 2024-12-12T16:28:37,301 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/bb7b85fd830a4622a81cb6089f70c8f9 2024-12-12T16:28:37,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/f3d834f71cc840ae813eaf8a8db42463 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f3d834f71cc840ae813eaf8a8db42463 2024-12-12T16:28:37,309 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f3d834f71cc840ae813eaf8a8db42463, entries=150, sequenceid=76, filesize=30.2 K 2024-12-12T16:28:37,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/c6b922adeb87435195cc3056128859e7 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/c6b922adeb87435195cc3056128859e7 2024-12-12T16:28:37,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,313 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/c6b922adeb87435195cc3056128859e7, entries=150, sequenceid=76, filesize=11.7 K 2024-12-12T16:28:37,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/bb7b85fd830a4622a81cb6089f70c8f9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/bb7b85fd830a4622a81cb6089f70c8f9 2024-12-12T16:28:37,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,328 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/bb7b85fd830a4622a81cb6089f70c8f9, entries=150, sequenceid=76, filesize=11.7 K 2024-12-12T16:28:37,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,329 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 53789b78ac3c456afdd9ca3b09fff4b9 in 1691ms, sequenceid=76, compaction requested=true 2024-12-12T16:28:37,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:37,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:37,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-12T16:28:37,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-12T16:28:37,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,331 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-12T16:28:37,331 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8460 sec 2024-12-12T16:28:37,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,333 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 1.8510 sec 2024-12-12T16:28:37,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T16:28:37,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,587 INFO [Thread-1616 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-12T16:28:37,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,588 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:28:37,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-12-12T16:28:37,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T16:28:37,590 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:28:37,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,591 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:28:37,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,591 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:28:37,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T16:28:37,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,743 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:37,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,743 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-12T16:28:37,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:37,744 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 53789b78ac3c456afdd9ca3b09fff4b9 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:28:37,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=A 2024-12-12T16:28:37,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:37,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=B 2024-12-12T16:28:37,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:37,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=C 2024-12-12T16:28:37,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:37,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126eecd7d0aeae41e0a257a3c7bbb7f178_53789b78ac3c456afdd9ca3b09fff4b9 is 50, key is test_row_1/A:col10/1734020915747/Put/seqid=0 2024-12-12T16:28:37,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742197_1373 (size=9714) 2024-12-12T16:28:37,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,784 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126eecd7d0aeae41e0a257a3c7bbb7f178_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126eecd7d0aeae41e0a257a3c7bbb7f178_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:37,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/607a440527b643baa82714a4bca42611, store: [table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:37,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/607a440527b643baa82714a4bca42611 is 175, key is test_row_1/A:col10/1734020915747/Put/seqid=0 2024-12-12T16:28:37,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742198_1374 (size=22361) 2024-12-12T16:28:37,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T16:28:37,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:37,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:37,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:37,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:37,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020977939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:37,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:37,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020977940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:37,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020977941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:37,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:37,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020977945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:37,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:37,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020977952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:38,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020978053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:38,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020978053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:38,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:38,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020978053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:38,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:38,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020978055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:38,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:38,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020978064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:38,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T16:28:38,200 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=87, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/607a440527b643baa82714a4bca42611 2024-12-12T16:28:38,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/2d2b7db07e8f45eb9d2a9c52e39a1213 is 50, key is test_row_1/B:col10/1734020915747/Put/seqid=0 2024-12-12T16:28:38,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742199_1375 (size=9657) 2024-12-12T16:28:38,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:38,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020978258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:38,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:38,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020978259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:38,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:38,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020978259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:38,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:38,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020978260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:38,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:38,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020978268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:38,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:38,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020978562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:38,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:38,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020978563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:38,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:38,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020978563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:38,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:38,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020978564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:38,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:38,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020978573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:38,615 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/2d2b7db07e8f45eb9d2a9c52e39a1213 2024-12-12T16:28:38,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/5d5a62756124418b9fa56ced489c5ad1 is 50, key is test_row_1/C:col10/1734020915747/Put/seqid=0 2024-12-12T16:28:38,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742200_1376 (size=9657) 2024-12-12T16:28:38,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T16:28:39,030 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/5d5a62756124418b9fa56ced489c5ad1 2024-12-12T16:28:39,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/607a440527b643baa82714a4bca42611 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/607a440527b643baa82714a4bca42611 2024-12-12T16:28:39,038 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/607a440527b643baa82714a4bca42611, entries=100, sequenceid=87, filesize=21.8 K 2024-12-12T16:28:39,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/2d2b7db07e8f45eb9d2a9c52e39a1213 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/2d2b7db07e8f45eb9d2a9c52e39a1213 2024-12-12T16:28:39,043 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/2d2b7db07e8f45eb9d2a9c52e39a1213, entries=100, sequenceid=87, filesize=9.4 K 2024-12-12T16:28:39,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/5d5a62756124418b9fa56ced489c5ad1 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/5d5a62756124418b9fa56ced489c5ad1 2024-12-12T16:28:39,048 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/5d5a62756124418b9fa56ced489c5ad1, entries=100, sequenceid=87, filesize=9.4 K 2024-12-12T16:28:39,049 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 53789b78ac3c456afdd9ca3b09fff4b9 in 1306ms, sequenceid=87, compaction requested=true 2024-12-12T16:28:39,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:39,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:39,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-12-12T16:28:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-12-12T16:28:39,052 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-12T16:28:39,052 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4600 sec 2024-12-12T16:28:39,053 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.4640 sec 2024-12-12T16:28:39,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:39,075 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53789b78ac3c456afdd9ca3b09fff4b9 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T16:28:39,075 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=A 2024-12-12T16:28:39,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:39,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=B 2024-12-12T16:28:39,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:39,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=C 2024-12-12T16:28:39,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:39,084 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212d0d4057f19974e02a113af907facc93d_53789b78ac3c456afdd9ca3b09fff4b9 is 50, key is test_row_0/A:col10/1734020919075/Put/seqid=0 2024-12-12T16:28:39,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020979078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742201_1377 (size=17034) 2024-12-12T16:28:39,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020979084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020979085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020979086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020979087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020979188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020979194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020979199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020979199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020979199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,216 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-12T16:28:39,216 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-12T16:28:39,395 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020979393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020979402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020979408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020979409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020979409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,488 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:39,492 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212d0d4057f19974e02a113af907facc93d_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d0d4057f19974e02a113af907facc93d_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:39,493 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/f7a3d20c94c6464ea2c065eb29dc3d12, store: [table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:39,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/f7a3d20c94c6464ea2c065eb29dc3d12 is 175, key is test_row_0/A:col10/1734020919075/Put/seqid=0 2024-12-12T16:28:39,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742202_1378 (size=48139) 2024-12-12T16:28:39,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T16:28:39,694 INFO [Thread-1616 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-12T16:28:39,696 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:28:39,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-12-12T16:28:39,697 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:28:39,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T16:28:39,698 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:28:39,698 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:28:39,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020979698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,713 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020979709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,716 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020979714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020979715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:39,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020979715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T16:28:39,850 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:39,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T16:28:39,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:39,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:39,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:39,851 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:39,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:39,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:39,899 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=114, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/f7a3d20c94c6464ea2c065eb29dc3d12 2024-12-12T16:28:39,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/ac9f02d86d784dbfaf8c009a0cded22d is 50, key is test_row_0/B:col10/1734020919075/Put/seqid=0 2024-12-12T16:28:39,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742203_1379 (size=12001) 2024-12-12T16:28:39,918 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/ac9f02d86d784dbfaf8c009a0cded22d 2024-12-12T16:28:39,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/bf26a44e092b40e797bf8fdc9b1b03d3 is 50, key is test_row_0/C:col10/1734020919075/Put/seqid=0 2024-12-12T16:28:39,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742204_1380 (size=12001) 2024-12-12T16:28:39,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T16:28:40,003 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:40,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T16:28:40,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:40,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:40,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:40,004 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:40,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:40,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:40,156 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:40,156 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T16:28:40,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:40,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:40,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:40,157 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:40,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:40,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:40,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:40,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020980206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:40,219 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:40,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020980215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:40,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:40,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020980218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:40,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:40,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020980225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:40,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:40,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020980226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:40,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T16:28:40,309 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:40,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T16:28:40,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:40,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:40,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:40,310 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:40,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:40,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:40,350 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/bf26a44e092b40e797bf8fdc9b1b03d3 2024-12-12T16:28:40,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/f7a3d20c94c6464ea2c065eb29dc3d12 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f7a3d20c94c6464ea2c065eb29dc3d12 2024-12-12T16:28:40,359 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f7a3d20c94c6464ea2c065eb29dc3d12, entries=250, sequenceid=114, filesize=47.0 K 2024-12-12T16:28:40,360 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/ac9f02d86d784dbfaf8c009a0cded22d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/ac9f02d86d784dbfaf8c009a0cded22d 2024-12-12T16:28:40,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/ac9f02d86d784dbfaf8c009a0cded22d, entries=150, sequenceid=114, filesize=11.7 K 2024-12-12T16:28:40,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/bf26a44e092b40e797bf8fdc9b1b03d3 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/bf26a44e092b40e797bf8fdc9b1b03d3 2024-12-12T16:28:40,368 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/bf26a44e092b40e797bf8fdc9b1b03d3, entries=150, sequenceid=114, filesize=11.7 K 2024-12-12T16:28:40,369 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 53789b78ac3c456afdd9ca3b09fff4b9 in 1295ms, sequenceid=114, compaction requested=true 2024-12-12T16:28:40,369 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:40,369 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53789b78ac3c456afdd9ca3b09fff4b9:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:28:40,369 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-12-12T16:28:40,369 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:40,369 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-12-12T16:28:40,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53789b78ac3c456afdd9ca3b09fff4b9:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:28:40,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:40,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53789b78ac3c456afdd9ca3b09fff4b9:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:28:40,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:40,371 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 194320 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-12-12T16:28:40,371 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 69662 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-12-12T16:28:40,371 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 53789b78ac3c456afdd9ca3b09fff4b9/A is initiating minor compaction (all files) 2024-12-12T16:28:40,371 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 53789b78ac3c456afdd9ca3b09fff4b9/B is initiating minor compaction (all files) 2024-12-12T16:28:40,371 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53789b78ac3c456afdd9ca3b09fff4b9/A in TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:40,371 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53789b78ac3c456afdd9ca3b09fff4b9/B in TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:40,371 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/cc060933e4894e4a9fd02eb9f1a3b124, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/ee56b8e7d56946dbbe6dd5d1237f8e97, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/fcf822a41aca41de8be14bb5671c4a55, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/c6b922adeb87435195cc3056128859e7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/2d2b7db07e8f45eb9d2a9c52e39a1213, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/ac9f02d86d784dbfaf8c009a0cded22d] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp, totalSize=68.0 K 2024-12-12T16:28:40,371 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/571dc34ce88f4262b8c75d180ab4eb0d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/ba80a12c0b8f49bdbe5f3e72912b37d3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/a8af27310a8f44c38729f3c8101c73d7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f3d834f71cc840ae813eaf8a8db42463, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/607a440527b643baa82714a4bca42611, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f7a3d20c94c6464ea2c065eb29dc3d12] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp, totalSize=189.8 K 2024-12-12T16:28:40,372 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=10 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:40,372 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/571dc34ce88f4262b8c75d180ab4eb0d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/ba80a12c0b8f49bdbe5f3e72912b37d3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/a8af27310a8f44c38729f3c8101c73d7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f3d834f71cc840ae813eaf8a8db42463, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/607a440527b643baa82714a4bca42611, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f7a3d20c94c6464ea2c065eb29dc3d12] 2024-12-12T16:28:40,372 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting cc060933e4894e4a9fd02eb9f1a3b124, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1734020911280 2024-12-12T16:28:40,372 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 571dc34ce88f4262b8c75d180ab4eb0d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1734020911280 2024-12-12T16:28:40,372 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting ee56b8e7d56946dbbe6dd5d1237f8e97, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734020911332 2024-12-12T16:28:40,373 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba80a12c0b8f49bdbe5f3e72912b37d3, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734020911332 2024-12-12T16:28:40,373 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting fcf822a41aca41de8be14bb5671c4a55, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734020912452 2024-12-12T16:28:40,373 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8af27310a8f44c38729f3c8101c73d7, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734020912452 2024-12-12T16:28:40,373 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting c6b922adeb87435195cc3056128859e7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734020913597 2024-12-12T16:28:40,373 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3d834f71cc840ae813eaf8a8db42463, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734020913597 2024-12-12T16:28:40,373 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d2b7db07e8f45eb9d2a9c52e39a1213, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1734020915747 2024-12-12T16:28:40,374 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 607a440527b643baa82714a4bca42611, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1734020915747 2024-12-12T16:28:40,374 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting ac9f02d86d784dbfaf8c009a0cded22d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1734020917939 2024-12-12T16:28:40,374 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7a3d20c94c6464ea2c065eb29dc3d12, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1734020917939 2024-12-12T16:28:40,386 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53789b78ac3c456afdd9ca3b09fff4b9#B#compaction#324 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:40,386 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/9657f6dab85443008b451cc18e486b87 is 50, key is test_row_0/B:col10/1734020919075/Put/seqid=0 2024-12-12T16:28:40,389 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:40,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742205_1381 (size=12207) 2024-12-12T16:28:40,396 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121273e87fe863154ce8b2bf0a86db5374c2_53789b78ac3c456afdd9ca3b09fff4b9 store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:40,398 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121273e87fe863154ce8b2bf0a86db5374c2_53789b78ac3c456afdd9ca3b09fff4b9, store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:40,398 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121273e87fe863154ce8b2bf0a86db5374c2_53789b78ac3c456afdd9ca3b09fff4b9 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:40,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742206_1382 (size=4469) 2024-12-12T16:28:40,462 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:40,462 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T16:28:40,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:40,463 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 53789b78ac3c456afdd9ca3b09fff4b9 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-12T16:28:40,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=A 2024-12-12T16:28:40,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:40,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=B 2024-12-12T16:28:40,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:40,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=C 2024-12-12T16:28:40,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:40,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212275b5407e97c4f8780d050d1606d81e4_53789b78ac3c456afdd9ca3b09fff4b9 is 50, key is test_row_0/A:col10/1734020919085/Put/seqid=0 2024-12-12T16:28:40,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742207_1383 (size=12154) 2024-12-12T16:28:40,799 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/9657f6dab85443008b451cc18e486b87 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/9657f6dab85443008b451cc18e486b87 2024-12-12T16:28:40,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T16:28:40,804 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 53789b78ac3c456afdd9ca3b09fff4b9/B of 53789b78ac3c456afdd9ca3b09fff4b9 into 9657f6dab85443008b451cc18e486b87(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:40,804 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:40,804 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9., storeName=53789b78ac3c456afdd9ca3b09fff4b9/B, priority=10, startTime=1734020920369; duration=0sec 2024-12-12T16:28:40,804 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:40,804 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53789b78ac3c456afdd9ca3b09fff4b9:B 2024-12-12T16:28:40,804 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-12-12T16:28:40,806 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 69662 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-12-12T16:28:40,806 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 53789b78ac3c456afdd9ca3b09fff4b9/C is initiating minor compaction (all files) 2024-12-12T16:28:40,806 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53789b78ac3c456afdd9ca3b09fff4b9/C in TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:40,806 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/fb36588fe9374dec920e6baf9983c044, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/75b0c29b87124b04944401002787db7b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/3a2e4a472846441cb8a8a0fc90bc1a8e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/bb7b85fd830a4622a81cb6089f70c8f9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/5d5a62756124418b9fa56ced489c5ad1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/bf26a44e092b40e797bf8fdc9b1b03d3] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp, totalSize=68.0 K 2024-12-12T16:28:40,807 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting fb36588fe9374dec920e6baf9983c044, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1734020911280 2024-12-12T16:28:40,807 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 75b0c29b87124b04944401002787db7b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734020911332 2024-12-12T16:28:40,807 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a2e4a472846441cb8a8a0fc90bc1a8e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734020912452 2024-12-12T16:28:40,808 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting bb7b85fd830a4622a81cb6089f70c8f9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734020913597 2024-12-12T16:28:40,808 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d5a62756124418b9fa56ced489c5ad1, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1734020915747 2024-12-12T16:28:40,808 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting bf26a44e092b40e797bf8fdc9b1b03d3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1734020917939 2024-12-12T16:28:40,810 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53789b78ac3c456afdd9ca3b09fff4b9#A#compaction#325 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:40,811 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/52382815b9b2473ea0d9f17dcf23b2a8 is 175, key is test_row_0/A:col10/1734020919075/Put/seqid=0 2024-12-12T16:28:40,827 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53789b78ac3c456afdd9ca3b09fff4b9#C#compaction#327 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:40,828 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/2cddaf44036046fd8f7d9eeda8b87736 is 50, key is test_row_0/C:col10/1734020919075/Put/seqid=0 2024-12-12T16:28:40,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742208_1384 (size=31161) 2024-12-12T16:28:40,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742209_1385 (size=12207) 2024-12-12T16:28:40,854 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/2cddaf44036046fd8f7d9eeda8b87736 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/2cddaf44036046fd8f7d9eeda8b87736 2024-12-12T16:28:40,860 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 53789b78ac3c456afdd9ca3b09fff4b9/C of 53789b78ac3c456afdd9ca3b09fff4b9 into 2cddaf44036046fd8f7d9eeda8b87736(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:40,860 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:40,860 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9., storeName=53789b78ac3c456afdd9ca3b09fff4b9/C, priority=10, startTime=1734020920370; duration=0sec 2024-12-12T16:28:40,860 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:40,860 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53789b78ac3c456afdd9ca3b09fff4b9:C 2024-12-12T16:28:40,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:40,887 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212275b5407e97c4f8780d050d1606d81e4_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212275b5407e97c4f8780d050d1606d81e4_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:40,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/5600e7a7640a45f8bb186b02f8511eea, store: [table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:40,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/5600e7a7640a45f8bb186b02f8511eea is 175, key is test_row_0/A:col10/1734020919085/Put/seqid=0 2024-12-12T16:28:40,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742210_1386 (size=30955) 2024-12-12T16:28:40,896 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=123, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/5600e7a7640a45f8bb186b02f8511eea 2024-12-12T16:28:40,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/270de29f5e9047508fc5c6b1d683b3d7 is 50, key is test_row_0/B:col10/1734020919085/Put/seqid=0 2024-12-12T16:28:40,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742211_1387 (size=12001) 2024-12-12T16:28:40,927 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/270de29f5e9047508fc5c6b1d683b3d7 2024-12-12T16:28:40,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/e27d2ea20ead4cceabad91e33d6cee14 is 50, key is test_row_0/C:col10/1734020919085/Put/seqid=0 2024-12-12T16:28:40,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742212_1388 (size=12001) 2024-12-12T16:28:41,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:41,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:41,239 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/52382815b9b2473ea0d9f17dcf23b2a8 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/52382815b9b2473ea0d9f17dcf23b2a8 2024-12-12T16:28:41,243 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 53789b78ac3c456afdd9ca3b09fff4b9/A of 53789b78ac3c456afdd9ca3b09fff4b9 into 52382815b9b2473ea0d9f17dcf23b2a8(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:41,243 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:41,243 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9., storeName=53789b78ac3c456afdd9ca3b09fff4b9/A, priority=10, startTime=1734020920369; duration=0sec 2024-12-12T16:28:41,243 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:41,243 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53789b78ac3c456afdd9ca3b09fff4b9:A 2024-12-12T16:28:41,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:41,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020981255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:41,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:41,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020981256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:41,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:41,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020981258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:41,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:41,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020981259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:41,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:41,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020981260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:41,346 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/e27d2ea20ead4cceabad91e33d6cee14 2024-12-12T16:28:41,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/5600e7a7640a45f8bb186b02f8511eea as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/5600e7a7640a45f8bb186b02f8511eea 2024-12-12T16:28:41,354 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/5600e7a7640a45f8bb186b02f8511eea, entries=150, sequenceid=123, filesize=30.2 K 2024-12-12T16:28:41,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/270de29f5e9047508fc5c6b1d683b3d7 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/270de29f5e9047508fc5c6b1d683b3d7 2024-12-12T16:28:41,360 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/270de29f5e9047508fc5c6b1d683b3d7, entries=150, sequenceid=123, filesize=11.7 K 2024-12-12T16:28:41,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/e27d2ea20ead4cceabad91e33d6cee14 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/e27d2ea20ead4cceabad91e33d6cee14 2024-12-12T16:28:41,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:41,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020981361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:41,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:41,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020981362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:41,366 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/e27d2ea20ead4cceabad91e33d6cee14, entries=150, sequenceid=123, filesize=11.7 K 2024-12-12T16:28:41,367 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 53789b78ac3c456afdd9ca3b09fff4b9 in 904ms, sequenceid=123, compaction requested=false 2024-12-12T16:28:41,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:41,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:41,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-12-12T16:28:41,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-12-12T16:28:41,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:41,369 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53789b78ac3c456afdd9ca3b09fff4b9 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-12T16:28:41,370 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-12T16:28:41,370 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6700 sec 2024-12-12T16:28:41,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=A 2024-12-12T16:28:41,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:41,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=B 2024-12-12T16:28:41,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:41,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=C 2024-12-12T16:28:41,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:41,372 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.6750 sec 2024-12-12T16:28:41,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:41,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020981375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:41,379 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212ac2bd4619d444637a5c3542f9a3c0044_53789b78ac3c456afdd9ca3b09fff4b9 is 50, key is test_row_0/A:col10/1734020921259/Put/seqid=0 2024-12-12T16:28:41,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:41,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020981377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:41,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:41,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020981380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:41,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742213_1389 (size=17284) 2024-12-12T16:28:41,394 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,399 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212ac2bd4619d444637a5c3542f9a3c0044_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ac2bd4619d444637a5c3542f9a3c0044_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:41,400 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/4fb755e949244ba18793312d823cefa4, store: [table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:41,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/4fb755e949244ba18793312d823cefa4 is 175, key is test_row_0/A:col10/1734020921259/Put/seqid=0 2024-12-12T16:28:41,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742214_1390 (size=48389) 2024-12-12T16:28:41,413 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=155, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/4fb755e949244ba18793312d823cefa4 2024-12-12T16:28:41,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/8c08686946194b11a3861940dbbf284f is 50, key is test_row_0/B:col10/1734020921259/Put/seqid=0 2024-12-12T16:28:41,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742215_1391 (size=12151) 2024-12-12T16:28:41,435 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/8c08686946194b11a3861940dbbf284f 2024-12-12T16:28:41,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/33b2aadded0543ddac018f030435c932 is 50, key is test_row_0/C:col10/1734020921259/Put/seqid=0 2024-12-12T16:28:41,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742216_1392 (size=12151) 2024-12-12T16:28:41,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:41,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020981478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:41,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:41,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020981482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:41,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:41,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020981483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:41,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:41,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020981566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:41,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:41,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020981566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:41,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:41,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020981682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:41,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:41,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020981688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:41,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:41,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020981689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:41,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T16:28:41,802 INFO [Thread-1616 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-12-12T16:28:41,803 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:28:41,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-12-12T16:28:41,805 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:28:41,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T16:28:41,806 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:28:41,806 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:28:41,849 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/33b2aadded0543ddac018f030435c932 2024-12-12T16:28:41,854 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/4fb755e949244ba18793312d823cefa4 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/4fb755e949244ba18793312d823cefa4 2024-12-12T16:28:41,858 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/4fb755e949244ba18793312d823cefa4, entries=250, sequenceid=155, filesize=47.3 K 2024-12-12T16:28:41,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/8c08686946194b11a3861940dbbf284f as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/8c08686946194b11a3861940dbbf284f 2024-12-12T16:28:41,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,863 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/8c08686946194b11a3861940dbbf284f, entries=150, sequenceid=155, filesize=11.9 K 2024-12-12T16:28:41,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/33b2aadded0543ddac018f030435c932 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/33b2aadded0543ddac018f030435c932 2024-12-12T16:28:41,868 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/33b2aadded0543ddac018f030435c932, entries=150, sequenceid=155, filesize=11.9 K 2024-12-12T16:28:41,868 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 53789b78ac3c456afdd9ca3b09fff4b9 in 499ms, sequenceid=155, compaction requested=true 2024-12-12T16:28:41,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:41,869 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:41,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53789b78ac3c456afdd9ca3b09fff4b9:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:28:41,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:41,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53789b78ac3c456afdd9ca3b09fff4b9:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:28:41,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:41,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53789b78ac3c456afdd9ca3b09fff4b9:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:28:41,869 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:41,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:41,870 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110505 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:41,870 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 53789b78ac3c456afdd9ca3b09fff4b9/A is initiating minor compaction (all files) 2024-12-12T16:28:41,870 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53789b78ac3c456afdd9ca3b09fff4b9/A in TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:41,870 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:41,870 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/52382815b9b2473ea0d9f17dcf23b2a8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/5600e7a7640a45f8bb186b02f8511eea, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/4fb755e949244ba18793312d823cefa4] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp, totalSize=107.9 K 2024-12-12T16:28:41,870 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 53789b78ac3c456afdd9ca3b09fff4b9/B is initiating minor compaction (all files) 2024-12-12T16:28:41,870 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:41,870 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53789b78ac3c456afdd9ca3b09fff4b9/B in TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:41,870 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/52382815b9b2473ea0d9f17dcf23b2a8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/5600e7a7640a45f8bb186b02f8511eea, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/4fb755e949244ba18793312d823cefa4] 2024-12-12T16:28:41,870 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/9657f6dab85443008b451cc18e486b87, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/270de29f5e9047508fc5c6b1d683b3d7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/8c08686946194b11a3861940dbbf284f] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp, totalSize=35.5 K 2024-12-12T16:28:41,871 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52382815b9b2473ea0d9f17dcf23b2a8, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1734020917939 2024-12-12T16:28:41,871 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 9657f6dab85443008b451cc18e486b87, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1734020917939 2024-12-12T16:28:41,871 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5600e7a7640a45f8bb186b02f8511eea, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1734020919083 2024-12-12T16:28:41,871 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 270de29f5e9047508fc5c6b1d683b3d7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1734020919083 2024-12-12T16:28:41,871 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c08686946194b11a3861940dbbf284f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734020921259 2024-12-12T16:28:41,871 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4fb755e949244ba18793312d823cefa4, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734020921250 2024-12-12T16:28:41,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,887 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53789b78ac3c456afdd9ca3b09fff4b9#B#compaction#333 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:41,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,887 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/fc432ca2852b4154a65e620b655088df is 50, key is test_row_0/B:col10/1734020921259/Put/seqid=0 2024-12-12T16:28:41,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,890 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:41,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,901 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412126b81b677b0264c9eb9d80b0cebb44c62_53789b78ac3c456afdd9ca3b09fff4b9 store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:41,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T16:28:41,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,907 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412126b81b677b0264c9eb9d80b0cebb44c62_53789b78ac3c456afdd9ca3b09fff4b9, store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:41,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,907 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126b81b677b0264c9eb9d80b0cebb44c62_53789b78ac3c456afdd9ca3b09fff4b9 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:41,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742218_1394 (size=4469) 2024-12-12T16:28:41,929 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53789b78ac3c456afdd9ca3b09fff4b9#A#compaction#334 average throughput is 0.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:41,930 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/db48d4d6a9114a38ad91e1a0eac5e1a5 is 175, key is test_row_0/A:col10/1734020921259/Put/seqid=0 2024-12-12T16:28:41,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742217_1393 (size=12459) 2024-12-12T16:28:41,942 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/fc432ca2852b4154a65e620b655088df as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/fc432ca2852b4154a65e620b655088df 2024-12-12T16:28:41,947 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53789b78ac3c456afdd9ca3b09fff4b9/B of 53789b78ac3c456afdd9ca3b09fff4b9 into fc432ca2852b4154a65e620b655088df(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:41,947 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:41,947 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9., storeName=53789b78ac3c456afdd9ca3b09fff4b9/B, priority=13, startTime=1734020921869; duration=0sec 2024-12-12T16:28:41,947 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:41,947 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53789b78ac3c456afdd9ca3b09fff4b9:B 2024-12-12T16:28:41,947 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:41,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,949 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:41,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,949 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 53789b78ac3c456afdd9ca3b09fff4b9/C is initiating minor compaction (all files) 2024-12-12T16:28:41,949 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53789b78ac3c456afdd9ca3b09fff4b9/C in TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:41,949 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/2cddaf44036046fd8f7d9eeda8b87736, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/e27d2ea20ead4cceabad91e33d6cee14, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/33b2aadded0543ddac018f030435c932] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp, totalSize=35.5 K 2024-12-12T16:28:41,949 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 2cddaf44036046fd8f7d9eeda8b87736, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1734020917939 2024-12-12T16:28:41,950 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting e27d2ea20ead4cceabad91e33d6cee14, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1734020919083 2024-12-12T16:28:41,950 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 33b2aadded0543ddac018f030435c932, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734020921259 2024-12-12T16:28:41,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,957 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:41,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,958 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T16:28:41,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:41,958 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 53789b78ac3c456afdd9ca3b09fff4b9 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-12T16:28:41,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,961 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53789b78ac3c456afdd9ca3b09fff4b9#C#compaction#335 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:41,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,961 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/1b2c720731834391b0486f199e1fb468 is 50, key is test_row_0/C:col10/1734020921259/Put/seqid=0 2024-12-12T16:28:41,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=A 2024-12-12T16:28:41,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:41,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=B 2024-12-12T16:28:41,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:41,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=C 2024-12-12T16:28:41,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:41,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742219_1395 (size=31413) 2024-12-12T16:28:41,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742220_1396 (size=12459) 2024-12-12T16:28:41,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412127c3c7ab961f94fa29c8f48fc3266a839_53789b78ac3c456afdd9ca3b09fff4b9 is 50, key is test_row_1/A:col10/1734020921958/Put/seqid=0 2024-12-12T16:28:41,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,996 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/1b2c720731834391b0486f199e1fb468 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/1b2c720731834391b0486f199e1fb468 2024-12-12T16:28:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,001 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53789b78ac3c456afdd9ca3b09fff4b9/C of 53789b78ac3c456afdd9ca3b09fff4b9 into 1b2c720731834391b0486f199e1fb468(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:42,001 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:42,001 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9., storeName=53789b78ac3c456afdd9ca3b09fff4b9/C, priority=13, startTime=1734020921869; duration=0sec 2024-12-12T16:28:42,001 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:42,001 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53789b78ac3c456afdd9ca3b09fff4b9:C 2024-12-12T16:28:42,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:41,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742221_1397 (size=14794) 2024-12-12T16:28:42,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:42,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:42,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020982085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020982086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020982086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020982088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020982088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T16:28:42,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020982193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020982196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020982197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020982197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020982197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,375 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/db48d4d6a9114a38ad91e1a0eac5e1a5 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/db48d4d6a9114a38ad91e1a0eac5e1a5 2024-12-12T16:28:42,380 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53789b78ac3c456afdd9ca3b09fff4b9/A of 53789b78ac3c456afdd9ca3b09fff4b9 into db48d4d6a9114a38ad91e1a0eac5e1a5(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:42,380 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:42,380 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9., storeName=53789b78ac3c456afdd9ca3b09fff4b9/A, priority=13, startTime=1734020921869; duration=0sec 2024-12-12T16:28:42,380 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:42,380 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53789b78ac3c456afdd9ca3b09fff4b9:A 2024-12-12T16:28:42,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020982396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,404 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020982401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020982402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020982402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020982402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:42,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T16:28:42,410 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412127c3c7ab961f94fa29c8f48fc3266a839_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127c3c7ab961f94fa29c8f48fc3266a839_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:42,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/030e5db5b59f4c2e993f86dc4d0ac386, store: [table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:42,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/030e5db5b59f4c2e993f86dc4d0ac386 is 175, key is test_row_1/A:col10/1734020921958/Put/seqid=0 2024-12-12T16:28:42,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742222_1398 (size=39745) 2024-12-12T16:28:42,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020982700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020982706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020982707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020982708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020982708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:42,817 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=165, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/030e5db5b59f4c2e993f86dc4d0ac386 2024-12-12T16:28:42,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/85964d63c52f4ee1824c56fdeea9d296 is 50, key is test_row_1/B:col10/1734020921958/Put/seqid=0 2024-12-12T16:28:42,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742223_1399 (size=9757) 2024-12-12T16:28:42,828 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/85964d63c52f4ee1824c56fdeea9d296 2024-12-12T16:28:42,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/565908d4e6aa4f3a8be622bc2982955d is 50, key is test_row_1/C:col10/1734020921958/Put/seqid=0 2024-12-12T16:28:42,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742224_1400 (size=9757) 2024-12-12T16:28:42,844 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/565908d4e6aa4f3a8be622bc2982955d 2024-12-12T16:28:42,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/030e5db5b59f4c2e993f86dc4d0ac386 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/030e5db5b59f4c2e993f86dc4d0ac386 2024-12-12T16:28:42,857 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/030e5db5b59f4c2e993f86dc4d0ac386, entries=200, sequenceid=165, filesize=38.8 K 2024-12-12T16:28:42,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/85964d63c52f4ee1824c56fdeea9d296 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/85964d63c52f4ee1824c56fdeea9d296 2024-12-12T16:28:42,861 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/85964d63c52f4ee1824c56fdeea9d296, entries=100, sequenceid=165, filesize=9.5 K 2024-12-12T16:28:42,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/565908d4e6aa4f3a8be622bc2982955d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/565908d4e6aa4f3a8be622bc2982955d 2024-12-12T16:28:42,866 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/565908d4e6aa4f3a8be622bc2982955d, entries=100, sequenceid=165, filesize=9.5 K 2024-12-12T16:28:42,866 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for 53789b78ac3c456afdd9ca3b09fff4b9 in 908ms, sequenceid=165, compaction requested=false 2024-12-12T16:28:42,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:42,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:42,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-12-12T16:28:42,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-12-12T16:28:42,869 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-12-12T16:28:42,869 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0620 sec 2024-12-12T16:28:42,871 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 1.0670 sec 2024-12-12T16:28:42,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T16:28:42,909 INFO [Thread-1616 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-12T16:28:42,910 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:28:42,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-12-12T16:28:42,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-12T16:28:42,912 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:28:42,912 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:28:42,912 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:28:43,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-12T16:28:43,064 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,065 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-12T16:28:43,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:43,065 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 53789b78ac3c456afdd9ca3b09fff4b9 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-12T16:28:43,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=A 2024-12-12T16:28:43,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:43,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=B 2024-12-12T16:28:43,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:43,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=C 2024-12-12T16:28:43,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:43,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c423d5afe99e459cbce14156594c5436_53789b78ac3c456afdd9ca3b09fff4b9 is 50, key is test_row_0/A:col10/1734020922086/Put/seqid=0 2024-12-12T16:28:43,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742225_1401 (size=12304) 2024-12-12T16:28:43,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:43,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:43,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-12T16:28:43,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020983214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020983215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020983216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020983217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020983218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020983319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020983321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020983321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,328 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020983326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:43,491 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c423d5afe99e459cbce14156594c5436_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c423d5afe99e459cbce14156594c5436_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:43,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/193f44ac81e64597b9ade013a8ce9325, store: [table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:43,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/193f44ac81e64597b9ade013a8ce9325 is 175, key is test_row_0/A:col10/1734020922086/Put/seqid=0 2024-12-12T16:28:43,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742226_1402 (size=31105) 2024-12-12T16:28:43,498 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=195, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/193f44ac81e64597b9ade013a8ce9325 2024-12-12T16:28:43,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/67d8f4cdf54748948812830182f66bf6 is 50, key is test_row_0/B:col10/1734020922086/Put/seqid=0 2024-12-12T16:28:43,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742227_1403 (size=12151) 2024-12-12T16:28:43,508 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/67d8f4cdf54748948812830182f66bf6 2024-12-12T16:28:43,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-12T16:28:43,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/0b2b6b99a3414d16b54f795242a36d74 is 50, key is test_row_0/C:col10/1734020922086/Put/seqid=0 2024-12-12T16:28:43,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742228_1404 (size=12151) 2024-12-12T16:28:43,518 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/0b2b6b99a3414d16b54f795242a36d74 2024-12-12T16:28:43,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/193f44ac81e64597b9ade013a8ce9325 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/193f44ac81e64597b9ade013a8ce9325 2024-12-12T16:28:43,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020983523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,528 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/193f44ac81e64597b9ade013a8ce9325, entries=150, sequenceid=195, filesize=30.4 K 2024-12-12T16:28:43,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/67d8f4cdf54748948812830182f66bf6 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/67d8f4cdf54748948812830182f66bf6 2024-12-12T16:28:43,529 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020983525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020983526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,534 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/67d8f4cdf54748948812830182f66bf6, entries=150, sequenceid=195, filesize=11.9 K 2024-12-12T16:28:43,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020983531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/0b2b6b99a3414d16b54f795242a36d74 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/0b2b6b99a3414d16b54f795242a36d74 2024-12-12T16:28:43,539 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/0b2b6b99a3414d16b54f795242a36d74, entries=150, sequenceid=195, filesize=11.9 K 2024-12-12T16:28:43,540 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 53789b78ac3c456afdd9ca3b09fff4b9 in 475ms, sequenceid=195, compaction requested=true 2024-12-12T16:28:43,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:43,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:43,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-12-12T16:28:43,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-12-12T16:28:43,542 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-12T16:28:43,542 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 629 msec 2024-12-12T16:28:43,543 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 633 msec 2024-12-12T16:28:43,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:43,838 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53789b78ac3c456afdd9ca3b09fff4b9 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:28:43,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=A 2024-12-12T16:28:43,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:43,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=B 2024-12-12T16:28:43,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:43,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=C 2024-12-12T16:28:43,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:43,846 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123f4697b44707468e8a6cde529c3f2411_53789b78ac3c456afdd9ca3b09fff4b9 is 50, key is test_row_0/A:col10/1734020923837/Put/seqid=0 2024-12-12T16:28:43,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742229_1405 (size=17284) 2024-12-12T16:28:43,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020983866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,876 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020983871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,876 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020983871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020983873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020983974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020983977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020983977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:43,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:43,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020983982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:44,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-12T16:28:44,015 INFO [Thread-1616 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-12-12T16:28:44,016 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:28:44,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-12-12T16:28:44,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T16:28:44,017 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:28:44,018 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:28:44,018 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:28:44,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T16:28:44,169 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:44,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-12T16:28:44,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:44,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:44,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:44,170 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:44,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:44,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:44,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:44,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020984178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:44,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:44,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020984182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:44,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:44,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020984183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:44,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:44,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020984185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:44,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:44,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020984221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:44,254 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:44,258 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123f4697b44707468e8a6cde529c3f2411_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123f4697b44707468e8a6cde529c3f2411_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:44,258 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/4a82a2e82fb64ff895b0b5788edbedc1, store: [table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:44,259 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/4a82a2e82fb64ff895b0b5788edbedc1 is 175, key is test_row_0/A:col10/1734020923837/Put/seqid=0 2024-12-12T16:28:44,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742230_1406 (size=48389) 2024-12-12T16:28:44,268 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=207, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/4a82a2e82fb64ff895b0b5788edbedc1 2024-12-12T16:28:44,276 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/d8e1bc2ef2414f43a694305ebfcca806 is 50, key is test_row_0/B:col10/1734020923837/Put/seqid=0 2024-12-12T16:28:44,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742231_1407 (size=12151) 2024-12-12T16:28:44,281 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/d8e1bc2ef2414f43a694305ebfcca806 2024-12-12T16:28:44,288 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/0c4c99efb5fa44fb92494cc58a7553c7 is 50, key is test_row_0/C:col10/1734020923837/Put/seqid=0 2024-12-12T16:28:44,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742232_1408 (size=12151) 2024-12-12T16:28:44,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T16:28:44,322 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:44,323 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-12T16:28:44,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:44,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:44,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:44,323 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:44,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:44,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:44,475 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:44,475 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-12T16:28:44,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:44,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:44,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:44,476 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:44,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:44,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:44,482 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:44,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020984481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:44,491 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:44,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020984487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:44,491 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:44,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020984488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:44,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:44,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020984492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:44,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T16:28:44,628 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:44,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-12T16:28:44,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:44,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:44,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:44,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:44,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:44,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:44,692 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/0c4c99efb5fa44fb92494cc58a7553c7 2024-12-12T16:28:44,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/4a82a2e82fb64ff895b0b5788edbedc1 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/4a82a2e82fb64ff895b0b5788edbedc1 2024-12-12T16:28:44,700 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/4a82a2e82fb64ff895b0b5788edbedc1, entries=250, sequenceid=207, filesize=47.3 K 2024-12-12T16:28:44,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/d8e1bc2ef2414f43a694305ebfcca806 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/d8e1bc2ef2414f43a694305ebfcca806 2024-12-12T16:28:44,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/d8e1bc2ef2414f43a694305ebfcca806, entries=150, sequenceid=207, filesize=11.9 K 2024-12-12T16:28:44,705 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/0c4c99efb5fa44fb92494cc58a7553c7 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/0c4c99efb5fa44fb92494cc58a7553c7 2024-12-12T16:28:44,709 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/0c4c99efb5fa44fb92494cc58a7553c7, entries=150, sequenceid=207, filesize=11.9 K 2024-12-12T16:28:44,710 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 53789b78ac3c456afdd9ca3b09fff4b9 in 871ms, sequenceid=207, compaction requested=true 2024-12-12T16:28:44,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:44,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53789b78ac3c456afdd9ca3b09fff4b9:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:28:44,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:44,710 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:28:44,710 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:28:44,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53789b78ac3c456afdd9ca3b09fff4b9:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:28:44,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:44,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53789b78ac3c456afdd9ca3b09fff4b9:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:28:44,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:44,711 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46518 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:28:44,712 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 53789b78ac3c456afdd9ca3b09fff4b9/B is initiating minor compaction (all files) 2024-12-12T16:28:44,712 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53789b78ac3c456afdd9ca3b09fff4b9/B in TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:44,712 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/fc432ca2852b4154a65e620b655088df, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/85964d63c52f4ee1824c56fdeea9d296, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/67d8f4cdf54748948812830182f66bf6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/d8e1bc2ef2414f43a694305ebfcca806] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp, totalSize=45.4 K 2024-12-12T16:28:44,712 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 150652 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:28:44,712 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 53789b78ac3c456afdd9ca3b09fff4b9/A is initiating minor compaction (all files) 2024-12-12T16:28:44,712 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53789b78ac3c456afdd9ca3b09fff4b9/A in TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:44,712 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/db48d4d6a9114a38ad91e1a0eac5e1a5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/030e5db5b59f4c2e993f86dc4d0ac386, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/193f44ac81e64597b9ade013a8ce9325, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/4a82a2e82fb64ff895b0b5788edbedc1] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp, totalSize=147.1 K 2024-12-12T16:28:44,712 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:44,712 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/db48d4d6a9114a38ad91e1a0eac5e1a5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/030e5db5b59f4c2e993f86dc4d0ac386, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/193f44ac81e64597b9ade013a8ce9325, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/4a82a2e82fb64ff895b0b5788edbedc1] 2024-12-12T16:28:44,713 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting fc432ca2852b4154a65e620b655088df, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734020921259 2024-12-12T16:28:44,713 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting db48d4d6a9114a38ad91e1a0eac5e1a5, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734020921259 2024-12-12T16:28:44,713 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 85964d63c52f4ee1824c56fdeea9d296, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1734020921370 2024-12-12T16:28:44,713 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 030e5db5b59f4c2e993f86dc4d0ac386, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1734020921370 2024-12-12T16:28:44,714 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 67d8f4cdf54748948812830182f66bf6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1734020922084 2024-12-12T16:28:44,714 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 193f44ac81e64597b9ade013a8ce9325, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1734020922084 2024-12-12T16:28:44,714 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting d8e1bc2ef2414f43a694305ebfcca806, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734020923835 2024-12-12T16:28:44,714 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a82a2e82fb64ff895b0b5788edbedc1, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734020923215 2024-12-12T16:28:44,723 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:44,725 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412126a674b4c65ce48279cc98ae68f116824_53789b78ac3c456afdd9ca3b09fff4b9 store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:44,726 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53789b78ac3c456afdd9ca3b09fff4b9#B#compaction#346 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:44,726 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/0c6eba69084f42119260a04018cb54d1 is 50, key is test_row_0/B:col10/1734020923837/Put/seqid=0 2024-12-12T16:28:44,728 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412126a674b4c65ce48279cc98ae68f116824_53789b78ac3c456afdd9ca3b09fff4b9, store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:44,728 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126a674b4c65ce48279cc98ae68f116824_53789b78ac3c456afdd9ca3b09fff4b9 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:44,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742233_1409 (size=12595) 2024-12-12T16:28:44,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742234_1410 (size=4469) 2024-12-12T16:28:44,760 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53789b78ac3c456afdd9ca3b09fff4b9#A#compaction#345 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:44,761 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/399a41decdc54956a5e354941ed352a1 is 175, key is test_row_0/A:col10/1734020923837/Put/seqid=0 2024-12-12T16:28:44,765 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/0c6eba69084f42119260a04018cb54d1 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/0c6eba69084f42119260a04018cb54d1 2024-12-12T16:28:44,769 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 53789b78ac3c456afdd9ca3b09fff4b9/B of 53789b78ac3c456afdd9ca3b09fff4b9 into 0c6eba69084f42119260a04018cb54d1(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:44,770 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:44,770 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9., storeName=53789b78ac3c456afdd9ca3b09fff4b9/B, priority=12, startTime=1734020924710; duration=0sec 2024-12-12T16:28:44,770 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:44,770 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53789b78ac3c456afdd9ca3b09fff4b9:B 2024-12-12T16:28:44,770 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:28:44,771 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46518 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:28:44,771 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 53789b78ac3c456afdd9ca3b09fff4b9/C is initiating minor compaction (all files) 2024-12-12T16:28:44,771 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53789b78ac3c456afdd9ca3b09fff4b9/C in TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:44,771 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/1b2c720731834391b0486f199e1fb468, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/565908d4e6aa4f3a8be622bc2982955d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/0b2b6b99a3414d16b54f795242a36d74, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/0c4c99efb5fa44fb92494cc58a7553c7] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp, totalSize=45.4 K 2024-12-12T16:28:44,772 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b2c720731834391b0486f199e1fb468, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734020921259 2024-12-12T16:28:44,773 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 565908d4e6aa4f3a8be622bc2982955d, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1734020921370 2024-12-12T16:28:44,773 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b2b6b99a3414d16b54f795242a36d74, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1734020922084 2024-12-12T16:28:44,774 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c4c99efb5fa44fb92494cc58a7553c7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734020923835 2024-12-12T16:28:44,781 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:44,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742235_1411 (size=31549) 2024-12-12T16:28:44,784 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-12T16:28:44,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:44,784 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 53789b78ac3c456afdd9ca3b09fff4b9 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T16:28:44,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=A 2024-12-12T16:28:44,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:44,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=B 2024-12-12T16:28:44,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:44,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=C 2024-12-12T16:28:44,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:44,795 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53789b78ac3c456afdd9ca3b09fff4b9#C#compaction#347 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:44,796 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/caaf086dbe8a44a1935d0e1ab526539d is 50, key is test_row_0/C:col10/1734020923837/Put/seqid=0 2024-12-12T16:28:44,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212820dc023975a4f44b8d64025fb02caf5_53789b78ac3c456afdd9ca3b09fff4b9 is 50, key is test_row_0/A:col10/1734020923872/Put/seqid=0 2024-12-12T16:28:44,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742236_1412 (size=12595) 2024-12-12T16:28:44,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742237_1413 (size=12304) 2024-12-12T16:28:44,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:44,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:45,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:45,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020985003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:45,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:45,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020985004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:45,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:45,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020985005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:45,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:45,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020985006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:45,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:45,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020985112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:45,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:45,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020985112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:45,116 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:45,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020985113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:45,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:45,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020985113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:45,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T16:28:45,187 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/399a41decdc54956a5e354941ed352a1 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/399a41decdc54956a5e354941ed352a1 2024-12-12T16:28:45,192 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 53789b78ac3c456afdd9ca3b09fff4b9/A of 53789b78ac3c456afdd9ca3b09fff4b9 into 399a41decdc54956a5e354941ed352a1(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:45,193 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:45,193 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9., storeName=53789b78ac3c456afdd9ca3b09fff4b9/A, priority=12, startTime=1734020924710; duration=0sec 2024-12-12T16:28:45,193 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:45,193 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53789b78ac3c456afdd9ca3b09fff4b9:A 2024-12-12T16:28:45,205 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/caaf086dbe8a44a1935d0e1ab526539d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/caaf086dbe8a44a1935d0e1ab526539d 2024-12-12T16:28:45,209 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 53789b78ac3c456afdd9ca3b09fff4b9/C of 53789b78ac3c456afdd9ca3b09fff4b9 into caaf086dbe8a44a1935d0e1ab526539d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:45,209 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:45,209 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9., storeName=53789b78ac3c456afdd9ca3b09fff4b9/C, priority=12, startTime=1734020924710; duration=0sec 2024-12-12T16:28:45,209 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:45,209 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53789b78ac3c456afdd9ca3b09fff4b9:C 2024-12-12T16:28:45,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:45,212 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212820dc023975a4f44b8d64025fb02caf5_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212820dc023975a4f44b8d64025fb02caf5_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:45,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/71003160ca254b8e83d6d8d111294310, store: [table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:45,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/71003160ca254b8e83d6d8d111294310 is 175, key is test_row_0/A:col10/1734020923872/Put/seqid=0 2024-12-12T16:28:45,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742238_1414 (size=31105) 2024-12-12T16:28:45,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:45,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020985314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:45,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:45,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020985315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:45,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:45,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020985317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:45,321 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:45,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020985318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:45,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:45,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020985617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:45,619 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=232, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/71003160ca254b8e83d6d8d111294310 2024-12-12T16:28:45,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:45,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020985619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:45,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:45,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020985623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:45,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:45,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020985623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:45,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/1c03b5a8441a4160ae4f3d85362c1ba7 is 50, key is test_row_0/B:col10/1734020923872/Put/seqid=0 2024-12-12T16:28:45,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742239_1415 (size=12151) 2024-12-12T16:28:46,031 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/1c03b5a8441a4160ae4f3d85362c1ba7 2024-12-12T16:28:46,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/a3e8b6e225144619942020afd2744e1d is 50, key is test_row_0/C:col10/1734020923872/Put/seqid=0 2024-12-12T16:28:46,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742240_1416 (size=12151) 2024-12-12T16:28:46,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T16:28:46,124 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:46,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020986120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:46,127 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:46,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020986123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:46,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:46,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020986126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:46,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:46,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020986129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:46,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:46,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020986237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:46,241 DEBUG [Thread-1606 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4155 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:28:46,448 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/a3e8b6e225144619942020afd2744e1d 2024-12-12T16:28:46,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/71003160ca254b8e83d6d8d111294310 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/71003160ca254b8e83d6d8d111294310 2024-12-12T16:28:46,465 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/71003160ca254b8e83d6d8d111294310, entries=150, sequenceid=232, filesize=30.4 K 2024-12-12T16:28:46,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/1c03b5a8441a4160ae4f3d85362c1ba7 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/1c03b5a8441a4160ae4f3d85362c1ba7 2024-12-12T16:28:46,470 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/1c03b5a8441a4160ae4f3d85362c1ba7, entries=150, sequenceid=232, filesize=11.9 K 2024-12-12T16:28:46,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/a3e8b6e225144619942020afd2744e1d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/a3e8b6e225144619942020afd2744e1d 2024-12-12T16:28:46,474 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/a3e8b6e225144619942020afd2744e1d, entries=150, sequenceid=232, filesize=11.9 K 2024-12-12T16:28:46,475 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 53789b78ac3c456afdd9ca3b09fff4b9 in 1691ms, sequenceid=232, compaction requested=false 2024-12-12T16:28:46,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:46,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:46,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-12-12T16:28:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-12-12T16:28:46,478 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-12T16:28:46,479 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4590 sec 2024-12-12T16:28:46,480 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 2.4630 sec 2024-12-12T16:28:47,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:47,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53789b78ac3c456afdd9ca3b09fff4b9 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T16:28:47,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=A 2024-12-12T16:28:47,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:47,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=B 2024-12-12T16:28:47,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:47,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=C 2024-12-12T16:28:47,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:47,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212189b4239423e406d966a6166e9b0eded_53789b78ac3c456afdd9ca3b09fff4b9 is 50, key is test_row_0/A:col10/1734020927132/Put/seqid=0 2024-12-12T16:28:47,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742241_1417 (size=14794) 2024-12-12T16:28:47,146 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:47,150 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212189b4239423e406d966a6166e9b0eded_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212189b4239423e406d966a6166e9b0eded_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:47,151 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/e4e0128eac284db1bf4dce0e30db8c9b, store: [table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:47,151 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/e4e0128eac284db1bf4dce0e30db8c9b is 175, key is test_row_0/A:col10/1734020927132/Put/seqid=0 2024-12-12T16:28:47,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742242_1418 (size=39749) 2024-12-12T16:28:47,158 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=247, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/e4e0128eac284db1bf4dce0e30db8c9b 2024-12-12T16:28:47,166 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/b55c4daa05a34a3183061b5c5b167c76 is 50, key is test_row_0/B:col10/1734020927132/Put/seqid=0 2024-12-12T16:28:47,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742243_1419 (size=12151) 2024-12-12T16:28:47,169 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/b55c4daa05a34a3183061b5c5b167c76 2024-12-12T16:28:47,174 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020987165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:47,175 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020987165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:47,177 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/fb06b86af1e044709c94661438eb4f0d is 50, key is test_row_0/C:col10/1734020927132/Put/seqid=0 2024-12-12T16:28:47,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742244_1420 (size=12151) 2024-12-12T16:28:47,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020987174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:47,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020987174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:47,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020987275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:47,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020987276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:47,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020987284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:47,290 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020987284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:47,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020987483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:47,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020987483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:47,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020987491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:47,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020987491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:47,581 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/fb06b86af1e044709c94661438eb4f0d 2024-12-12T16:28:47,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/e4e0128eac284db1bf4dce0e30db8c9b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/e4e0128eac284db1bf4dce0e30db8c9b 2024-12-12T16:28:47,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/e4e0128eac284db1bf4dce0e30db8c9b, entries=200, sequenceid=247, filesize=38.8 K 2024-12-12T16:28:47,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/b55c4daa05a34a3183061b5c5b167c76 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/b55c4daa05a34a3183061b5c5b167c76 2024-12-12T16:28:47,593 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/b55c4daa05a34a3183061b5c5b167c76, entries=150, sequenceid=247, filesize=11.9 K 2024-12-12T16:28:47,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/fb06b86af1e044709c94661438eb4f0d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/fb06b86af1e044709c94661438eb4f0d 2024-12-12T16:28:47,597 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/fb06b86af1e044709c94661438eb4f0d, entries=150, sequenceid=247, filesize=11.9 K 2024-12-12T16:28:47,597 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 53789b78ac3c456afdd9ca3b09fff4b9 in 464ms, sequenceid=247, compaction requested=true 2024-12-12T16:28:47,597 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:47,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53789b78ac3c456afdd9ca3b09fff4b9:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:28:47,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:47,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53789b78ac3c456afdd9ca3b09fff4b9:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:28:47,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:47,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53789b78ac3c456afdd9ca3b09fff4b9:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:28:47,598 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:47,598 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:47,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:47,599 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:47,599 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 53789b78ac3c456afdd9ca3b09fff4b9/B is initiating minor compaction (all files) 2024-12-12T16:28:47,599 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53789b78ac3c456afdd9ca3b09fff4b9/B in TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:47,599 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/0c6eba69084f42119260a04018cb54d1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/1c03b5a8441a4160ae4f3d85362c1ba7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/b55c4daa05a34a3183061b5c5b167c76] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp, totalSize=36.0 K 2024-12-12T16:28:47,599 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102403 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:47,599 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 53789b78ac3c456afdd9ca3b09fff4b9/A is initiating minor compaction (all files) 2024-12-12T16:28:47,599 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53789b78ac3c456afdd9ca3b09fff4b9/A in TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:47,600 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/399a41decdc54956a5e354941ed352a1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/71003160ca254b8e83d6d8d111294310, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/e4e0128eac284db1bf4dce0e30db8c9b] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp, totalSize=100.0 K 2024-12-12T16:28:47,600 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:47,600 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/399a41decdc54956a5e354941ed352a1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/71003160ca254b8e83d6d8d111294310, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/e4e0128eac284db1bf4dce0e30db8c9b] 2024-12-12T16:28:47,600 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c6eba69084f42119260a04018cb54d1, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734020923835 2024-12-12T16:28:47,600 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 399a41decdc54956a5e354941ed352a1, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734020923835 2024-12-12T16:28:47,600 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71003160ca254b8e83d6d8d111294310, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1734020923865 2024-12-12T16:28:47,600 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c03b5a8441a4160ae4f3d85362c1ba7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1734020923865 2024-12-12T16:28:47,601 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting b55c4daa05a34a3183061b5c5b167c76, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1734020924997 2024-12-12T16:28:47,601 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4e0128eac284db1bf4dce0e30db8c9b, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1734020924997 2024-12-12T16:28:47,607 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:47,609 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53789b78ac3c456afdd9ca3b09fff4b9#B#compaction#354 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:47,609 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412122d63c8e8a84042f9907814e3dd6a5340_53789b78ac3c456afdd9ca3b09fff4b9 store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:47,609 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/b9b2c92a645f4ca69447cbf4e4dd3bd9 is 50, key is test_row_0/B:col10/1734020927132/Put/seqid=0 2024-12-12T16:28:47,610 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412122d63c8e8a84042f9907814e3dd6a5340_53789b78ac3c456afdd9ca3b09fff4b9, store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:47,610 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412122d63c8e8a84042f9907814e3dd6a5340_53789b78ac3c456afdd9ca3b09fff4b9 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:47,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742245_1421 (size=12697) 2024-12-12T16:28:47,635 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/b9b2c92a645f4ca69447cbf4e4dd3bd9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/b9b2c92a645f4ca69447cbf4e4dd3bd9 2024-12-12T16:28:47,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742246_1422 (size=4469) 2024-12-12T16:28:47,641 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53789b78ac3c456afdd9ca3b09fff4b9/B of 53789b78ac3c456afdd9ca3b09fff4b9 into b9b2c92a645f4ca69447cbf4e4dd3bd9(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:47,641 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:47,641 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9., storeName=53789b78ac3c456afdd9ca3b09fff4b9/B, priority=13, startTime=1734020927598; duration=0sec 2024-12-12T16:28:47,641 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53789b78ac3c456afdd9ca3b09fff4b9#A#compaction#355 average throughput is 0.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:47,641 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:47,641 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53789b78ac3c456afdd9ca3b09fff4b9:B 2024-12-12T16:28:47,641 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:28:47,642 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/d2e7d9cb27c14678bf8f14bdcaf318ad is 175, key is test_row_0/A:col10/1734020927132/Put/seqid=0 2024-12-12T16:28:47,644 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:28:47,644 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 53789b78ac3c456afdd9ca3b09fff4b9/C is initiating minor compaction (all files) 2024-12-12T16:28:47,644 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53789b78ac3c456afdd9ca3b09fff4b9/C in TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:47,644 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/caaf086dbe8a44a1935d0e1ab526539d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/a3e8b6e225144619942020afd2744e1d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/fb06b86af1e044709c94661438eb4f0d] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp, totalSize=36.0 K 2024-12-12T16:28:47,644 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting caaf086dbe8a44a1935d0e1ab526539d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734020923835 2024-12-12T16:28:47,645 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting a3e8b6e225144619942020afd2744e1d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1734020923865 2024-12-12T16:28:47,645 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting fb06b86af1e044709c94661438eb4f0d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1734020924997 2024-12-12T16:28:47,666 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53789b78ac3c456afdd9ca3b09fff4b9#C#compaction#356 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:47,666 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/de8389de099246c482e4f612b3f2cefd is 50, key is test_row_0/C:col10/1734020927132/Put/seqid=0 2024-12-12T16:28:47,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742247_1423 (size=31651) 2024-12-12T16:28:47,691 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/d2e7d9cb27c14678bf8f14bdcaf318ad as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/d2e7d9cb27c14678bf8f14bdcaf318ad 2024-12-12T16:28:47,696 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53789b78ac3c456afdd9ca3b09fff4b9/A of 53789b78ac3c456afdd9ca3b09fff4b9 into d2e7d9cb27c14678bf8f14bdcaf318ad(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:47,696 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:47,697 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9., storeName=53789b78ac3c456afdd9ca3b09fff4b9/A, priority=13, startTime=1734020927597; duration=0sec 2024-12-12T16:28:47,697 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:47,697 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53789b78ac3c456afdd9ca3b09fff4b9:A 2024-12-12T16:28:47,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742248_1424 (size=12697) 2024-12-12T16:28:47,707 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/de8389de099246c482e4f612b3f2cefd as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/de8389de099246c482e4f612b3f2cefd 2024-12-12T16:28:47,712 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 53789b78ac3c456afdd9ca3b09fff4b9/C of 53789b78ac3c456afdd9ca3b09fff4b9 into de8389de099246c482e4f612b3f2cefd(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:47,712 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:47,712 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9., storeName=53789b78ac3c456afdd9ca3b09fff4b9/C, priority=13, startTime=1734020927598; duration=0sec 2024-12-12T16:28:47,712 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:47,712 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53789b78ac3c456afdd9ca3b09fff4b9:C 2024-12-12T16:28:47,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:47,793 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53789b78ac3c456afdd9ca3b09fff4b9 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T16:28:47,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=A 2024-12-12T16:28:47,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:47,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=B 2024-12-12T16:28:47,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:47,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=C 2024-12-12T16:28:47,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:47,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a0d54180c25b4bf281e45eccad9217d8_53789b78ac3c456afdd9ca3b09fff4b9 is 50, key is test_row_0/A:col10/1734020927173/Put/seqid=0 2024-12-12T16:28:47,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020987804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:47,808 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T16:28:47,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742249_1425 (size=17534) 2024-12-12T16:28:47,811 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020987805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:47,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020987806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:47,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020987807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:47,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020987909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:47,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020987913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:47,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020987913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:47,917 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:47,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020987914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:48,116 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:48,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020988114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:48,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:48,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020988117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:48,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:48,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020988117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:48,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:48,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020988119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:48,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T16:28:48,122 INFO [Thread-1616 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-12-12T16:28:48,123 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:28:48,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-12-12T16:28:48,124 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:28:48,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T16:28:48,125 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:28:48,125 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:28:48,210 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:48,214 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a0d54180c25b4bf281e45eccad9217d8_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a0d54180c25b4bf281e45eccad9217d8_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:48,215 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/7782498ebc454fb08544da39d5ba9ca6, store: [table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:48,215 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/7782498ebc454fb08544da39d5ba9ca6 is 175, key is test_row_0/A:col10/1734020927173/Put/seqid=0 2024-12-12T16:28:48,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742250_1426 (size=48639) 2024-12-12T16:28:48,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T16:28:48,277 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:48,277 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T16:28:48,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:48,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:48,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:48,278 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:48,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:48,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:48,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:48,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020988418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:48,426 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:48,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T16:28:48,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020988423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:48,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:48,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020988423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:48,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:48,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020988424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:48,430 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:48,431 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T16:28:48,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:48,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:48,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:48,431 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:48,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:48,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:48,583 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:48,583 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T16:28:48,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:48,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:48,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:48,584 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:48,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:48,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:48,620 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=275, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/7782498ebc454fb08544da39d5ba9ca6 2024-12-12T16:28:48,626 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/8a5bb42854d24538a46b3d1f45ce72b2 is 50, key is test_row_0/B:col10/1734020927173/Put/seqid=0 2024-12-12T16:28:48,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742251_1427 (size=12301) 2024-12-12T16:28:48,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T16:28:48,736 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:48,736 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T16:28:48,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:48,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:48,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:48,737 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:48,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:48,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:48,889 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:48,889 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T16:28:48,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:48,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:48,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:48,890 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:48,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:48,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:48,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:48,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020988928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:48,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:48,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020988931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:48,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:48,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020988931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:48,935 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:48,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020988931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:49,031 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/8a5bb42854d24538a46b3d1f45ce72b2 2024-12-12T16:28:49,043 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:49,044 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T16:28:49,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:49,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:49,044 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/b1fd7403e10146729f9981d7dde0b9bf is 50, key is test_row_0/C:col10/1734020927173/Put/seqid=0 2024-12-12T16:28:49,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:49,044 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:49,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:49,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:49,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742252_1428 (size=12301) 2024-12-12T16:28:49,196 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:49,197 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T16:28:49,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:49,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:49,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:49,197 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:49,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:49,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:49,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T16:28:49,349 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:49,349 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T16:28:49,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:49,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:49,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:49,350 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:49,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:49,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:28:49,450 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/b1fd7403e10146729f9981d7dde0b9bf 2024-12-12T16:28:49,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/7782498ebc454fb08544da39d5ba9ca6 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/7782498ebc454fb08544da39d5ba9ca6 2024-12-12T16:28:49,458 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/7782498ebc454fb08544da39d5ba9ca6, entries=250, sequenceid=275, filesize=47.5 K 2024-12-12T16:28:49,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/8a5bb42854d24538a46b3d1f45ce72b2 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/8a5bb42854d24538a46b3d1f45ce72b2 2024-12-12T16:28:49,462 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/8a5bb42854d24538a46b3d1f45ce72b2, entries=150, sequenceid=275, filesize=12.0 K 2024-12-12T16:28:49,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/b1fd7403e10146729f9981d7dde0b9bf as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/b1fd7403e10146729f9981d7dde0b9bf 2024-12-12T16:28:49,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/b1fd7403e10146729f9981d7dde0b9bf, entries=150, sequenceid=275, filesize=12.0 K 2024-12-12T16:28:49,468 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 53789b78ac3c456afdd9ca3b09fff4b9 in 1674ms, sequenceid=275, compaction requested=false 2024-12-12T16:28:49,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:49,502 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:49,502 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T16:28:49,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:49,503 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing 53789b78ac3c456afdd9ca3b09fff4b9 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:28:49,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=A 2024-12-12T16:28:49,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:49,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=B 2024-12-12T16:28:49,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:49,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=C 2024-12-12T16:28:49,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:49,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123e22b04d19ae4f33918c6532282d18f1_53789b78ac3c456afdd9ca3b09fff4b9 is 50, key is test_row_0/A:col10/1734020927806/Put/seqid=0 2024-12-12T16:28:49,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742253_1429 (size=12454) 2024-12-12T16:28:49,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:49,921 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123e22b04d19ae4f33918c6532282d18f1_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123e22b04d19ae4f33918c6532282d18f1_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:49,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/dbc4245a4c0c4f45b76921093032ad1a, store: [table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:49,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/dbc4245a4c0c4f45b76921093032ad1a is 175, key is test_row_0/A:col10/1734020927806/Put/seqid=0 2024-12-12T16:28:49,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742254_1430 (size=31255) 2024-12-12T16:28:49,936 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. as already flushing 2024-12-12T16:28:49,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:50,007 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:50,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020990001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:50,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:50,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020990002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:50,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:50,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020990007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:50,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:50,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020990007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:50,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:50,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020990109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:50,112 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:50,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020990109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:50,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:50,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020990111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:50,117 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:50,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020990114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:50,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T16:28:50,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:50,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59174 deadline: 1734020990269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:50,277 DEBUG [Thread-1606 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8190 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:28:50,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:50,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020990314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:50,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:50,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020990314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:50,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:50,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020990314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:50,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:50,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020990318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:50,328 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=286, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/dbc4245a4c0c4f45b76921093032ad1a 2024-12-12T16:28:50,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/bb33c33095af4c5f9af84e21d9ba3993 is 50, key is test_row_0/B:col10/1734020927806/Put/seqid=0 2024-12-12T16:28:50,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742255_1431 (size=12301) 2024-12-12T16:28:50,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:50,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020990619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:50,625 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:50,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020990619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:50,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:50,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020990620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:50,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:50,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020990623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:50,744 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/bb33c33095af4c5f9af84e21d9ba3993 2024-12-12T16:28:50,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/470f4864bdd047aaa53d66ce2a1d39e3 is 50, key is test_row_0/C:col10/1734020927806/Put/seqid=0 2024-12-12T16:28:50,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742256_1432 (size=12301) 2024-12-12T16:28:50,756 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/470f4864bdd047aaa53d66ce2a1d39e3 2024-12-12T16:28:50,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/dbc4245a4c0c4f45b76921093032ad1a as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/dbc4245a4c0c4f45b76921093032ad1a 2024-12-12T16:28:50,763 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/dbc4245a4c0c4f45b76921093032ad1a, entries=150, sequenceid=286, filesize=30.5 K 2024-12-12T16:28:50,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/bb33c33095af4c5f9af84e21d9ba3993 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/bb33c33095af4c5f9af84e21d9ba3993 2024-12-12T16:28:50,777 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/bb33c33095af4c5f9af84e21d9ba3993, entries=150, sequenceid=286, filesize=12.0 K 2024-12-12T16:28:50,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/470f4864bdd047aaa53d66ce2a1d39e3 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/470f4864bdd047aaa53d66ce2a1d39e3 2024-12-12T16:28:50,782 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/470f4864bdd047aaa53d66ce2a1d39e3, entries=150, sequenceid=286, filesize=12.0 K 2024-12-12T16:28:50,783 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 53789b78ac3c456afdd9ca3b09fff4b9 in 1280ms, sequenceid=286, compaction requested=true 2024-12-12T16:28:50,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:50,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:50,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-12T16:28:50,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-12-12T16:28:50,785 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-12T16:28:50,785 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6590 sec 2024-12-12T16:28:50,788 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 2.6640 sec 2024-12-12T16:28:51,135 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53789b78ac3c456afdd9ca3b09fff4b9 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-12T16:28:51,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:51,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=A 2024-12-12T16:28:51,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:51,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=B 2024-12-12T16:28:51,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:51,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=C 2024-12-12T16:28:51,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:51,144 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c7145ba963ed4914a80f7c27c5b8700b_53789b78ac3c456afdd9ca3b09fff4b9 is 50, key is test_row_0/A:col10/1734020931134/Put/seqid=0 2024-12-12T16:28:51,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:51,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020991144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:51,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:51,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020991145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:51,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:51,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020991146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:51,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742257_1433 (size=14994) 2024-12-12T16:28:51,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:51,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020991146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:51,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:51,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020991248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:51,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:51,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020991254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:51,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:51,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020991254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:51,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:51,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020991254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:51,269 DEBUG [Thread-1623 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68035c67 to 127.0.0.1:52684 2024-12-12T16:28:51,269 DEBUG [Thread-1623 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:51,271 DEBUG [Thread-1619 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75e4d3d0 to 127.0.0.1:52684 2024-12-12T16:28:51,271 DEBUG [Thread-1619 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:51,275 DEBUG [Thread-1621 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b308f62 to 127.0.0.1:52684 2024-12-12T16:28:51,275 DEBUG [Thread-1621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:51,276 DEBUG [Thread-1625 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3eab689a to 127.0.0.1:52684 2024-12-12T16:28:51,276 DEBUG [Thread-1625 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:51,277 DEBUG [Thread-1617 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x683f8469 to 127.0.0.1:52684 2024-12-12T16:28:51,277 DEBUG [Thread-1617 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:51,457 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:51,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020991457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:51,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:51,460 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:51,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020991459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:51,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020991460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:51,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:51,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020991461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:51,554 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:51,557 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c7145ba963ed4914a80f7c27c5b8700b_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c7145ba963ed4914a80f7c27c5b8700b_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:51,558 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/f26d60cbfe154b9fb20d419aebf0471d, store: [table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:51,558 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/f26d60cbfe154b9fb20d419aebf0471d is 175, key is test_row_0/A:col10/1734020931134/Put/seqid=0 2024-12-12T16:28:51,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742258_1434 (size=39949) 2024-12-12T16:28:51,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:51,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020991758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:51,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:51,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020991761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:51,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:51,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020991762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:51,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:51,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020991763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:51,962 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=314, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/f26d60cbfe154b9fb20d419aebf0471d 2024-12-12T16:28:51,968 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/cdd2ce54257e413ca6ac681c40e2e986 is 50, key is test_row_0/B:col10/1734020931134/Put/seqid=0 2024-12-12T16:28:51,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742259_1435 (size=12301) 2024-12-12T16:28:52,190 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/4f6a4780a2f6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/571dc34ce88f4262b8c75d180ab4eb0d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/ba80a12c0b8f49bdbe5f3e72912b37d3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/a8af27310a8f44c38729f3c8101c73d7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f3d834f71cc840ae813eaf8a8db42463, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/607a440527b643baa82714a4bca42611, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f7a3d20c94c6464ea2c065eb29dc3d12, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/52382815b9b2473ea0d9f17dcf23b2a8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/5600e7a7640a45f8bb186b02f8511eea, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/4fb755e949244ba18793312d823cefa4, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/db48d4d6a9114a38ad91e1a0eac5e1a5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/030e5db5b59f4c2e993f86dc4d0ac386, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/193f44ac81e64597b9ade013a8ce9325, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/4a82a2e82fb64ff895b0b5788edbedc1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/399a41decdc54956a5e354941ed352a1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/71003160ca254b8e83d6d8d111294310, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/e4e0128eac284db1bf4dce0e30db8c9b] to archive 2024-12-12T16:28:52,191 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/4f6a4780a2f6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:28:52,193 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/571dc34ce88f4262b8c75d180ab4eb0d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/571dc34ce88f4262b8c75d180ab4eb0d 2024-12-12T16:28:52,193 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f3d834f71cc840ae813eaf8a8db42463 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f3d834f71cc840ae813eaf8a8db42463 2024-12-12T16:28:52,193 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/ba80a12c0b8f49bdbe5f3e72912b37d3 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/ba80a12c0b8f49bdbe5f3e72912b37d3 2024-12-12T16:28:52,193 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f7a3d20c94c6464ea2c065eb29dc3d12 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f7a3d20c94c6464ea2c065eb29dc3d12 2024-12-12T16:28:52,194 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/a8af27310a8f44c38729f3c8101c73d7 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/a8af27310a8f44c38729f3c8101c73d7 2024-12-12T16:28:52,194 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/52382815b9b2473ea0d9f17dcf23b2a8 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/52382815b9b2473ea0d9f17dcf23b2a8 2024-12-12T16:28:52,194 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/5600e7a7640a45f8bb186b02f8511eea to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/5600e7a7640a45f8bb186b02f8511eea 2024-12-12T16:28:52,194 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/607a440527b643baa82714a4bca42611 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/607a440527b643baa82714a4bca42611 2024-12-12T16:28:52,195 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/4fb755e949244ba18793312d823cefa4 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/4fb755e949244ba18793312d823cefa4 2024-12-12T16:28:52,195 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/4a82a2e82fb64ff895b0b5788edbedc1 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/4a82a2e82fb64ff895b0b5788edbedc1 2024-12-12T16:28:52,195 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/db48d4d6a9114a38ad91e1a0eac5e1a5 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/db48d4d6a9114a38ad91e1a0eac5e1a5 2024-12-12T16:28:52,195 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/030e5db5b59f4c2e993f86dc4d0ac386 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/030e5db5b59f4c2e993f86dc4d0ac386 2024-12-12T16:28:52,195 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/399a41decdc54956a5e354941ed352a1 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/399a41decdc54956a5e354941ed352a1 2024-12-12T16:28:52,196 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/193f44ac81e64597b9ade013a8ce9325 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/193f44ac81e64597b9ade013a8ce9325 2024-12-12T16:28:52,196 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/71003160ca254b8e83d6d8d111294310 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/71003160ca254b8e83d6d8d111294310 2024-12-12T16:28:52,196 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/e4e0128eac284db1bf4dce0e30db8c9b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/e4e0128eac284db1bf4dce0e30db8c9b 2024-12-12T16:28:52,198 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/4f6a4780a2f6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/cc060933e4894e4a9fd02eb9f1a3b124, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/ee56b8e7d56946dbbe6dd5d1237f8e97, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/fcf822a41aca41de8be14bb5671c4a55, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/c6b922adeb87435195cc3056128859e7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/2d2b7db07e8f45eb9d2a9c52e39a1213, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/9657f6dab85443008b451cc18e486b87, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/ac9f02d86d784dbfaf8c009a0cded22d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/270de29f5e9047508fc5c6b1d683b3d7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/fc432ca2852b4154a65e620b655088df, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/8c08686946194b11a3861940dbbf284f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/85964d63c52f4ee1824c56fdeea9d296, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/67d8f4cdf54748948812830182f66bf6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/0c6eba69084f42119260a04018cb54d1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/d8e1bc2ef2414f43a694305ebfcca806, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/1c03b5a8441a4160ae4f3d85362c1ba7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/b55c4daa05a34a3183061b5c5b167c76] to archive 2024-12-12T16:28:52,198 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/4f6a4780a2f6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:28:52,200 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/fcf822a41aca41de8be14bb5671c4a55 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/fcf822a41aca41de8be14bb5671c4a55 2024-12-12T16:28:52,200 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/cc060933e4894e4a9fd02eb9f1a3b124 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/cc060933e4894e4a9fd02eb9f1a3b124 2024-12-12T16:28:52,201 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/270de29f5e9047508fc5c6b1d683b3d7 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/270de29f5e9047508fc5c6b1d683b3d7 2024-12-12T16:28:52,201 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/ee56b8e7d56946dbbe6dd5d1237f8e97 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/ee56b8e7d56946dbbe6dd5d1237f8e97 2024-12-12T16:28:52,201 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/2d2b7db07e8f45eb9d2a9c52e39a1213 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/2d2b7db07e8f45eb9d2a9c52e39a1213 2024-12-12T16:28:52,201 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/9657f6dab85443008b451cc18e486b87 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/9657f6dab85443008b451cc18e486b87 2024-12-12T16:28:52,201 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/ac9f02d86d784dbfaf8c009a0cded22d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/ac9f02d86d784dbfaf8c009a0cded22d 2024-12-12T16:28:52,202 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/c6b922adeb87435195cc3056128859e7 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/c6b922adeb87435195cc3056128859e7 2024-12-12T16:28:52,202 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/fc432ca2852b4154a65e620b655088df to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/fc432ca2852b4154a65e620b655088df 2024-12-12T16:28:52,202 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/0c6eba69084f42119260a04018cb54d1 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/0c6eba69084f42119260a04018cb54d1 2024-12-12T16:28:52,202 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/67d8f4cdf54748948812830182f66bf6 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/67d8f4cdf54748948812830182f66bf6 2024-12-12T16:28:52,203 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/85964d63c52f4ee1824c56fdeea9d296 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/85964d63c52f4ee1824c56fdeea9d296 2024-12-12T16:28:52,203 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/d8e1bc2ef2414f43a694305ebfcca806 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/d8e1bc2ef2414f43a694305ebfcca806 2024-12-12T16:28:52,203 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/8c08686946194b11a3861940dbbf284f to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/8c08686946194b11a3861940dbbf284f 2024-12-12T16:28:52,203 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/1c03b5a8441a4160ae4f3d85362c1ba7 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/1c03b5a8441a4160ae4f3d85362c1ba7 2024-12-12T16:28:52,203 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/b55c4daa05a34a3183061b5c5b167c76 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/b55c4daa05a34a3183061b5c5b167c76 2024-12-12T16:28:52,205 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/4f6a4780a2f6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/fb36588fe9374dec920e6baf9983c044, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/75b0c29b87124b04944401002787db7b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/3a2e4a472846441cb8a8a0fc90bc1a8e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/bb7b85fd830a4622a81cb6089f70c8f9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/5d5a62756124418b9fa56ced489c5ad1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/2cddaf44036046fd8f7d9eeda8b87736, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/bf26a44e092b40e797bf8fdc9b1b03d3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/e27d2ea20ead4cceabad91e33d6cee14, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/1b2c720731834391b0486f199e1fb468, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/33b2aadded0543ddac018f030435c932, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/565908d4e6aa4f3a8be622bc2982955d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/0b2b6b99a3414d16b54f795242a36d74, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/caaf086dbe8a44a1935d0e1ab526539d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/0c4c99efb5fa44fb92494cc58a7553c7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/a3e8b6e225144619942020afd2744e1d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/fb06b86af1e044709c94661438eb4f0d] to archive 2024-12-12T16:28:52,206 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/4f6a4780a2f6:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:28:52,207 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/fb36588fe9374dec920e6baf9983c044 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/fb36588fe9374dec920e6baf9983c044 2024-12-12T16:28:52,207 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/5d5a62756124418b9fa56ced489c5ad1 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/5d5a62756124418b9fa56ced489c5ad1 2024-12-12T16:28:52,207 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/bf26a44e092b40e797bf8fdc9b1b03d3 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/bf26a44e092b40e797bf8fdc9b1b03d3 2024-12-12T16:28:52,208 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/75b0c29b87124b04944401002787db7b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/75b0c29b87124b04944401002787db7b 2024-12-12T16:28:52,208 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/e27d2ea20ead4cceabad91e33d6cee14 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/e27d2ea20ead4cceabad91e33d6cee14 2024-12-12T16:28:52,208 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/bb7b85fd830a4622a81cb6089f70c8f9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/bb7b85fd830a4622a81cb6089f70c8f9 2024-12-12T16:28:52,208 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/2cddaf44036046fd8f7d9eeda8b87736 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/2cddaf44036046fd8f7d9eeda8b87736 2024-12-12T16:28:52,208 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/3a2e4a472846441cb8a8a0fc90bc1a8e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/3a2e4a472846441cb8a8a0fc90bc1a8e 2024-12-12T16:28:52,209 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/1b2c720731834391b0486f199e1fb468 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/1b2c720731834391b0486f199e1fb468 2024-12-12T16:28:52,209 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/33b2aadded0543ddac018f030435c932 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/33b2aadded0543ddac018f030435c932 2024-12-12T16:28:52,209 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/565908d4e6aa4f3a8be622bc2982955d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/565908d4e6aa4f3a8be622bc2982955d 2024-12-12T16:28:52,210 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/0b2b6b99a3414d16b54f795242a36d74 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/0b2b6b99a3414d16b54f795242a36d74 2024-12-12T16:28:52,210 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/caaf086dbe8a44a1935d0e1ab526539d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/caaf086dbe8a44a1935d0e1ab526539d 2024-12-12T16:28:52,210 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/0c4c99efb5fa44fb92494cc58a7553c7 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/0c4c99efb5fa44fb92494cc58a7553c7 2024-12-12T16:28:52,210 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/fb06b86af1e044709c94661438eb4f0d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/fb06b86af1e044709c94661438eb4f0d 2024-12-12T16:28:52,210 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/a3e8b6e225144619942020afd2744e1d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/a3e8b6e225144619942020afd2744e1d 2024-12-12T16:28:52,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T16:28:52,229 INFO [Thread-1616 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-12T16:28:52,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:52,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59094 deadline: 1734020992262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:52,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:52,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59122 deadline: 1734020992263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:52,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:52,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59138 deadline: 1734020992263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:52,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:28:52,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59166 deadline: 1734020992265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:28:52,372 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/cdd2ce54257e413ca6ac681c40e2e986 2024-12-12T16:28:52,378 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/c975adcb659640b499b1939b932095ef is 50, key is test_row_0/C:col10/1734020931134/Put/seqid=0 2024-12-12T16:28:52,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742260_1436 (size=12301) 2024-12-12T16:28:52,782 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/c975adcb659640b499b1939b932095ef 2024-12-12T16:28:52,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/f26d60cbfe154b9fb20d419aebf0471d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f26d60cbfe154b9fb20d419aebf0471d 2024-12-12T16:28:52,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f26d60cbfe154b9fb20d419aebf0471d, entries=200, sequenceid=314, filesize=39.0 K 2024-12-12T16:28:52,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/cdd2ce54257e413ca6ac681c40e2e986 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/cdd2ce54257e413ca6ac681c40e2e986 2024-12-12T16:28:52,792 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/cdd2ce54257e413ca6ac681c40e2e986, entries=150, sequenceid=314, filesize=12.0 K 2024-12-12T16:28:52,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/c975adcb659640b499b1939b932095ef as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/c975adcb659640b499b1939b932095ef 2024-12-12T16:28:52,795 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/c975adcb659640b499b1939b932095ef, entries=150, sequenceid=314, filesize=12.0 K 2024-12-12T16:28:52,796 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 53789b78ac3c456afdd9ca3b09fff4b9 in 1661ms, sequenceid=314, compaction requested=true 2024-12-12T16:28:52,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:52,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53789b78ac3c456afdd9ca3b09fff4b9:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:28:52,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:52,796 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:28:52,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53789b78ac3c456afdd9ca3b09fff4b9:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:28:52,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:52,796 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:28:52,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 53789b78ac3c456afdd9ca3b09fff4b9:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:28:52,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:52,797 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49600 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:28:52,797 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 151494 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:28:52,797 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 53789b78ac3c456afdd9ca3b09fff4b9/A is initiating minor compaction (all files) 2024-12-12T16:28:52,797 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 53789b78ac3c456afdd9ca3b09fff4b9/B is initiating minor compaction (all files) 2024-12-12T16:28:52,797 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53789b78ac3c456afdd9ca3b09fff4b9/A in TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:52,797 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53789b78ac3c456afdd9ca3b09fff4b9/B in TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:52,797 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/d2e7d9cb27c14678bf8f14bdcaf318ad, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/7782498ebc454fb08544da39d5ba9ca6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/dbc4245a4c0c4f45b76921093032ad1a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f26d60cbfe154b9fb20d419aebf0471d] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp, totalSize=147.9 K 2024-12-12T16:28:52,797 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/b9b2c92a645f4ca69447cbf4e4dd3bd9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/8a5bb42854d24538a46b3d1f45ce72b2, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/bb33c33095af4c5f9af84e21d9ba3993, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/cdd2ce54257e413ca6ac681c40e2e986] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp, totalSize=48.4 K 2024-12-12T16:28:52,797 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:52,797 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/d2e7d9cb27c14678bf8f14bdcaf318ad, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/7782498ebc454fb08544da39d5ba9ca6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/dbc4245a4c0c4f45b76921093032ad1a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f26d60cbfe154b9fb20d419aebf0471d] 2024-12-12T16:28:52,797 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting b9b2c92a645f4ca69447cbf4e4dd3bd9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1734020924997 2024-12-12T16:28:52,797 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2e7d9cb27c14678bf8f14bdcaf318ad, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1734020924997 2024-12-12T16:28:52,798 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a5bb42854d24538a46b3d1f45ce72b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1734020927173 2024-12-12T16:28:52,798 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7782498ebc454fb08544da39d5ba9ca6, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1734020927163 2024-12-12T16:28:52,798 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting bb33c33095af4c5f9af84e21d9ba3993, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1734020927799 2024-12-12T16:28:52,798 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting dbc4245a4c0c4f45b76921093032ad1a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1734020927799 2024-12-12T16:28:52,798 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting cdd2ce54257e413ca6ac681c40e2e986, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1734020930000 2024-12-12T16:28:52,798 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting f26d60cbfe154b9fb20d419aebf0471d, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1734020930000 2024-12-12T16:28:52,807 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53789b78ac3c456afdd9ca3b09fff4b9#B#compaction#366 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:52,807 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:52,808 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/ab0933e9241347b5aff1645995c46a20 is 50, key is test_row_0/B:col10/1734020931134/Put/seqid=0 2024-12-12T16:28:52,810 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412124fc264f6207849a48cf1d33d0db99533_53789b78ac3c456afdd9ca3b09fff4b9 store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:52,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742261_1437 (size=12439) 2024-12-12T16:28:52,837 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412124fc264f6207849a48cf1d33d0db99533_53789b78ac3c456afdd9ca3b09fff4b9, store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:52,837 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412124fc264f6207849a48cf1d33d0db99533_53789b78ac3c456afdd9ca3b09fff4b9 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:52,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742262_1438 (size=4469) 2024-12-12T16:28:53,216 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/ab0933e9241347b5aff1645995c46a20 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/ab0933e9241347b5aff1645995c46a20 2024-12-12T16:28:53,220 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 53789b78ac3c456afdd9ca3b09fff4b9/B of 53789b78ac3c456afdd9ca3b09fff4b9 into ab0933e9241347b5aff1645995c46a20(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:53,220 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:53,220 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9., storeName=53789b78ac3c456afdd9ca3b09fff4b9/B, priority=12, startTime=1734020932796; duration=0sec 2024-12-12T16:28:53,220 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:28:53,220 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53789b78ac3c456afdd9ca3b09fff4b9:B 2024-12-12T16:28:53,221 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:28:53,221 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49600 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:28:53,221 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 53789b78ac3c456afdd9ca3b09fff4b9/C is initiating minor compaction (all files) 2024-12-12T16:28:53,221 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 53789b78ac3c456afdd9ca3b09fff4b9/C in TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:28:53,222 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/de8389de099246c482e4f612b3f2cefd, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/b1fd7403e10146729f9981d7dde0b9bf, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/470f4864bdd047aaa53d66ce2a1d39e3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/c975adcb659640b499b1939b932095ef] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp, totalSize=48.4 K 2024-12-12T16:28:53,222 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting de8389de099246c482e4f612b3f2cefd, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1734020924997 2024-12-12T16:28:53,222 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting b1fd7403e10146729f9981d7dde0b9bf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1734020927173 2024-12-12T16:28:53,222 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 470f4864bdd047aaa53d66ce2a1d39e3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1734020927799 2024-12-12T16:28:53,222 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting c975adcb659640b499b1939b932095ef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1734020930000 2024-12-12T16:28:53,229 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53789b78ac3c456afdd9ca3b09fff4b9#C#compaction#368 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:53,229 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/18da7e5da43241109f5a0bc133507d35 is 50, key is test_row_0/C:col10/1734020931134/Put/seqid=0 2024-12-12T16:28:53,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742263_1439 (size=12439) 2024-12-12T16:28:53,242 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 53789b78ac3c456afdd9ca3b09fff4b9#A#compaction#367 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:28:53,243 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/556fcdb4a1df4c2d92b8d81a111bd38b is 175, key is test_row_0/A:col10/1734020931134/Put/seqid=0 2024-12-12T16:28:53,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742264_1440 (size=31393) 2024-12-12T16:28:53,267 DEBUG [Thread-1612 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f94d721 to 127.0.0.1:52684 2024-12-12T16:28:53,268 DEBUG [Thread-1612 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:53,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:53,270 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 53789b78ac3c456afdd9ca3b09fff4b9 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T16:28:53,270 DEBUG [Thread-1608 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3dd5b441 to 127.0.0.1:52684 2024-12-12T16:28:53,270 DEBUG [Thread-1608 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:53,270 DEBUG [Thread-1610 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c336ea4 to 127.0.0.1:52684 2024-12-12T16:28:53,270 DEBUG [Thread-1610 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:53,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=A 2024-12-12T16:28:53,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:53,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=B 2024-12-12T16:28:53,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:53,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=C 2024-12-12T16:28:53,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:28:53,276 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128d1bbb3b22f445b9aaf2b612e40b41b5_53789b78ac3c456afdd9ca3b09fff4b9 is 50, key is test_row_0/A:col10/1734020933269/Put/seqid=0 2024-12-12T16:28:53,276 DEBUG [Thread-1614 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x319559be to 127.0.0.1:52684 2024-12-12T16:28:53,276 DEBUG [Thread-1614 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:28:53,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742265_1441 (size=12454) 2024-12-12T16:28:53,640 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/18da7e5da43241109f5a0bc133507d35 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/18da7e5da43241109f5a0bc133507d35 2024-12-12T16:28:53,644 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 53789b78ac3c456afdd9ca3b09fff4b9/C of 53789b78ac3c456afdd9ca3b09fff4b9 into 18da7e5da43241109f5a0bc133507d35(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:53,644 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:53,644 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9., storeName=53789b78ac3c456afdd9ca3b09fff4b9/C, priority=12, startTime=1734020932796; duration=0sec 2024-12-12T16:28:53,644 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:53,644 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53789b78ac3c456afdd9ca3b09fff4b9:C 2024-12-12T16:28:53,649 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/556fcdb4a1df4c2d92b8d81a111bd38b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/556fcdb4a1df4c2d92b8d81a111bd38b 2024-12-12T16:28:53,652 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 53789b78ac3c456afdd9ca3b09fff4b9/A of 53789b78ac3c456afdd9ca3b09fff4b9 into 556fcdb4a1df4c2d92b8d81a111bd38b(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:28:53,652 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:28:53,652 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9., storeName=53789b78ac3c456afdd9ca3b09fff4b9/A, priority=12, startTime=1734020932796; duration=0sec 2024-12-12T16:28:53,652 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:28:53,652 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 53789b78ac3c456afdd9ca3b09fff4b9:A 2024-12-12T16:28:53,680 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:28:53,683 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128d1bbb3b22f445b9aaf2b612e40b41b5_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128d1bbb3b22f445b9aaf2b612e40b41b5_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:28:53,684 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/905e9436cc9b4a9194ce71e2d8b8cb81, store: [table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:28:53,684 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/905e9436cc9b4a9194ce71e2d8b8cb81 is 175, key is test_row_0/A:col10/1734020933269/Put/seqid=0 2024-12-12T16:28:53,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742266_1442 (size=31255) 2024-12-12T16:28:54,088 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=327, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/905e9436cc9b4a9194ce71e2d8b8cb81 2024-12-12T16:28:54,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/c3f2f1dc2dfc43659402c959034b7e99 is 50, key is test_row_0/B:col10/1734020933269/Put/seqid=0 2024-12-12T16:28:54,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742267_1443 (size=12301) 2024-12-12T16:28:54,498 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/c3f2f1dc2dfc43659402c959034b7e99 2024-12-12T16:28:54,503 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/881734e666d948338b3526d142e20a56 is 50, key is test_row_0/C:col10/1734020933269/Put/seqid=0 2024-12-12T16:28:54,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742268_1444 (size=12301) 2024-12-12T16:28:54,907 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/881734e666d948338b3526d142e20a56 2024-12-12T16:28:54,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/905e9436cc9b4a9194ce71e2d8b8cb81 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/905e9436cc9b4a9194ce71e2d8b8cb81 2024-12-12T16:28:54,913 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/905e9436cc9b4a9194ce71e2d8b8cb81, entries=150, sequenceid=327, filesize=30.5 K 2024-12-12T16:28:54,914 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/c3f2f1dc2dfc43659402c959034b7e99 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/c3f2f1dc2dfc43659402c959034b7e99 2024-12-12T16:28:54,916 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/c3f2f1dc2dfc43659402c959034b7e99, entries=150, sequenceid=327, filesize=12.0 K 2024-12-12T16:28:54,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/881734e666d948338b3526d142e20a56 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/881734e666d948338b3526d142e20a56 2024-12-12T16:28:54,920 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/881734e666d948338b3526d142e20a56, entries=150, sequenceid=327, filesize=12.0 K 2024-12-12T16:28:54,920 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=6.71 KB/6870 for 53789b78ac3c456afdd9ca3b09fff4b9 in 1651ms, sequenceid=327, compaction requested=false 2024-12-12T16:28:54,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:29:00,299 DEBUG [Thread-1606 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ec15031 to 127.0.0.1:52684 2024-12-12T16:29:00,299 DEBUG [Thread-1606 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:00,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T16:29:00,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 29 2024-12-12T16:29:00,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-12-12T16:29:00,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 52 2024-12-12T16:29:00,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 59 2024-12-12T16:29:00,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-12-12T16:29:00,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T16:29:00,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T16:29:00,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2444 2024-12-12T16:29:00,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7331 rows 2024-12-12T16:29:00,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2469 2024-12-12T16:29:00,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7407 rows 2024-12-12T16:29:00,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2462 2024-12-12T16:29:00,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7384 rows 2024-12-12T16:29:00,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2467 2024-12-12T16:29:00,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7400 rows 2024-12-12T16:29:00,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2437 2024-12-12T16:29:00,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7311 rows 2024-12-12T16:29:00,299 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T16:29:00,299 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7b6cf8cb to 127.0.0.1:52684 2024-12-12T16:29:00,299 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:00,302 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T16:29:00,302 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T16:29:00,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T16:29:00,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T16:29:00,306 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020940306"}]},"ts":"1734020940306"} 2024-12-12T16:29:00,307 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T16:29:00,309 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T16:29:00,310 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T16:29:00,311 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=53789b78ac3c456afdd9ca3b09fff4b9, UNASSIGN}] 2024-12-12T16:29:00,312 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=53789b78ac3c456afdd9ca3b09fff4b9, UNASSIGN 2024-12-12T16:29:00,312 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=53789b78ac3c456afdd9ca3b09fff4b9, regionState=CLOSING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:00,313 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T16:29:00,313 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; CloseRegionProcedure 53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:29:00,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T16:29:00,464 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:00,465 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(124): Close 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:00,465 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T16:29:00,465 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1681): Closing 53789b78ac3c456afdd9ca3b09fff4b9, disabling compactions & flushes 2024-12-12T16:29:00,465 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:29:00,465 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:29:00,465 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. after waiting 0 ms 2024-12-12T16:29:00,465 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:29:00,465 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(2837): Flushing 53789b78ac3c456afdd9ca3b09fff4b9 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-12-12T16:29:00,465 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=A 2024-12-12T16:29:00,465 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:00,465 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=B 2024-12-12T16:29:00,465 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:00,465 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 53789b78ac3c456afdd9ca3b09fff4b9, store=C 2024-12-12T16:29:00,465 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:00,470 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123f4ebb6f7f744113906301f575589914_53789b78ac3c456afdd9ca3b09fff4b9 is 50, key is test_row_1/A:col10/1734020933275/Put/seqid=0 2024-12-12T16:29:00,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742269_1445 (size=9914) 2024-12-12T16:29:00,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T16:29:00,874 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:00,877 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123f4ebb6f7f744113906301f575589914_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123f4ebb6f7f744113906301f575589914_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:00,878 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/db850f0fa04048fdb22c17ff7c014105, store: [table=TestAcidGuarantees family=A region=53789b78ac3c456afdd9ca3b09fff4b9] 2024-12-12T16:29:00,878 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/db850f0fa04048fdb22c17ff7c014105 is 175, key is test_row_1/A:col10/1734020933275/Put/seqid=0 2024-12-12T16:29:00,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742270_1446 (size=22561) 2024-12-12T16:29:00,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T16:29:01,282 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=334, memsize=4.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/db850f0fa04048fdb22c17ff7c014105 2024-12-12T16:29:01,287 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/aa1e1dc50bf946cb95163b1a1fdbd219 is 50, key is test_row_1/B:col10/1734020933275/Put/seqid=0 2024-12-12T16:29:01,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742271_1447 (size=9857) 2024-12-12T16:29:01,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T16:29:01,691 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/aa1e1dc50bf946cb95163b1a1fdbd219 2024-12-12T16:29:01,696 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/28cd5cc3fb1b45f3b8a0d67ee7d1e26f is 50, key is test_row_1/C:col10/1734020933275/Put/seqid=0 2024-12-12T16:29:01,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742272_1448 (size=9857) 2024-12-12T16:29:02,100 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/28cd5cc3fb1b45f3b8a0d67ee7d1e26f 2024-12-12T16:29:02,104 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/A/db850f0fa04048fdb22c17ff7c014105 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/db850f0fa04048fdb22c17ff7c014105 2024-12-12T16:29:02,107 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/db850f0fa04048fdb22c17ff7c014105, entries=100, sequenceid=334, filesize=22.0 K 2024-12-12T16:29:02,107 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/B/aa1e1dc50bf946cb95163b1a1fdbd219 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/aa1e1dc50bf946cb95163b1a1fdbd219 2024-12-12T16:29:02,110 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/aa1e1dc50bf946cb95163b1a1fdbd219, entries=100, sequenceid=334, filesize=9.6 K 2024-12-12T16:29:02,110 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/.tmp/C/28cd5cc3fb1b45f3b8a0d67ee7d1e26f as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/28cd5cc3fb1b45f3b8a0d67ee7d1e26f 2024-12-12T16:29:02,113 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/28cd5cc3fb1b45f3b8a0d67ee7d1e26f, entries=100, sequenceid=334, filesize=9.6 K 2024-12-12T16:29:02,114 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=0 B/0 for 53789b78ac3c456afdd9ca3b09fff4b9 in 1649ms, sequenceid=334, compaction requested=true 2024-12-12T16:29:02,114 DEBUG [StoreCloser-TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/d2e7d9cb27c14678bf8f14bdcaf318ad, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/7782498ebc454fb08544da39d5ba9ca6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/dbc4245a4c0c4f45b76921093032ad1a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f26d60cbfe154b9fb20d419aebf0471d] to archive 2024-12-12T16:29:02,115 DEBUG [StoreCloser-TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:29:02,116 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/dbc4245a4c0c4f45b76921093032ad1a to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/dbc4245a4c0c4f45b76921093032ad1a 2024-12-12T16:29:02,116 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/d2e7d9cb27c14678bf8f14bdcaf318ad to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/d2e7d9cb27c14678bf8f14bdcaf318ad 2024-12-12T16:29:02,116 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f26d60cbfe154b9fb20d419aebf0471d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/f26d60cbfe154b9fb20d419aebf0471d 2024-12-12T16:29:02,116 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/7782498ebc454fb08544da39d5ba9ca6 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/7782498ebc454fb08544da39d5ba9ca6 2024-12-12T16:29:02,117 DEBUG [StoreCloser-TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/b9b2c92a645f4ca69447cbf4e4dd3bd9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/8a5bb42854d24538a46b3d1f45ce72b2, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/bb33c33095af4c5f9af84e21d9ba3993, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/cdd2ce54257e413ca6ac681c40e2e986] to archive 2024-12-12T16:29:02,118 DEBUG [StoreCloser-TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:29:02,119 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/8a5bb42854d24538a46b3d1f45ce72b2 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/8a5bb42854d24538a46b3d1f45ce72b2 2024-12-12T16:29:02,119 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/b9b2c92a645f4ca69447cbf4e4dd3bd9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/b9b2c92a645f4ca69447cbf4e4dd3bd9 2024-12-12T16:29:02,119 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/bb33c33095af4c5f9af84e21d9ba3993 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/bb33c33095af4c5f9af84e21d9ba3993 2024-12-12T16:29:02,120 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/cdd2ce54257e413ca6ac681c40e2e986 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/cdd2ce54257e413ca6ac681c40e2e986 2024-12-12T16:29:02,120 DEBUG [StoreCloser-TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/de8389de099246c482e4f612b3f2cefd, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/b1fd7403e10146729f9981d7dde0b9bf, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/470f4864bdd047aaa53d66ce2a1d39e3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/c975adcb659640b499b1939b932095ef] to archive 2024-12-12T16:29:02,121 DEBUG [StoreCloser-TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:29:02,122 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/de8389de099246c482e4f612b3f2cefd to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/de8389de099246c482e4f612b3f2cefd 2024-12-12T16:29:02,122 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/c975adcb659640b499b1939b932095ef to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/c975adcb659640b499b1939b932095ef 2024-12-12T16:29:02,122 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/470f4864bdd047aaa53d66ce2a1d39e3 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/470f4864bdd047aaa53d66ce2a1d39e3 2024-12-12T16:29:02,123 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/b1fd7403e10146729f9981d7dde0b9bf to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/b1fd7403e10146729f9981d7dde0b9bf 2024-12-12T16:29:02,126 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/recovered.edits/337.seqid, newMaxSeqId=337, maxSeqId=4 2024-12-12T16:29:02,126 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9. 2024-12-12T16:29:02,126 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1635): Region close journal for 53789b78ac3c456afdd9ca3b09fff4b9: 2024-12-12T16:29:02,128 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(170): Closed 53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,128 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=53789b78ac3c456afdd9ca3b09fff4b9, regionState=CLOSED 2024-12-12T16:29:02,130 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-12T16:29:02,130 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseRegionProcedure 53789b78ac3c456afdd9ca3b09fff4b9, server=4f6a4780a2f6,41933,1734020809476 in 1.8160 sec 2024-12-12T16:29:02,131 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=124, resume processing ppid=123 2024-12-12T16:29:02,131 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, ppid=123, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=53789b78ac3c456afdd9ca3b09fff4b9, UNASSIGN in 1.8190 sec 2024-12-12T16:29:02,133 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-12T16:29:02,133 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8220 sec 2024-12-12T16:29:02,134 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020942133"}]},"ts":"1734020942133"} 2024-12-12T16:29:02,134 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T16:29:02,136 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T16:29:02,137 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8350 sec 2024-12-12T16:29:02,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T16:29:02,409 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-12-12T16:29:02,410 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T16:29:02,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:29:02,411 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:29:02,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-12T16:29:02,411 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=126, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:29:02,413 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,415 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A, FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B, FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C, FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/recovered.edits] 2024-12-12T16:29:02,418 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/556fcdb4a1df4c2d92b8d81a111bd38b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/556fcdb4a1df4c2d92b8d81a111bd38b 2024-12-12T16:29:02,418 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/905e9436cc9b4a9194ce71e2d8b8cb81 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/905e9436cc9b4a9194ce71e2d8b8cb81 2024-12-12T16:29:02,418 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/db850f0fa04048fdb22c17ff7c014105 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/A/db850f0fa04048fdb22c17ff7c014105 2024-12-12T16:29:02,420 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/aa1e1dc50bf946cb95163b1a1fdbd219 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/aa1e1dc50bf946cb95163b1a1fdbd219 2024-12-12T16:29:02,420 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/ab0933e9241347b5aff1645995c46a20 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/ab0933e9241347b5aff1645995c46a20 2024-12-12T16:29:02,420 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/c3f2f1dc2dfc43659402c959034b7e99 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/B/c3f2f1dc2dfc43659402c959034b7e99 2024-12-12T16:29:02,423 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/28cd5cc3fb1b45f3b8a0d67ee7d1e26f to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/28cd5cc3fb1b45f3b8a0d67ee7d1e26f 2024-12-12T16:29:02,423 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/18da7e5da43241109f5a0bc133507d35 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/18da7e5da43241109f5a0bc133507d35 2024-12-12T16:29:02,423 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/881734e666d948338b3526d142e20a56 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/C/881734e666d948338b3526d142e20a56 2024-12-12T16:29:02,425 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/recovered.edits/337.seqid to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9/recovered.edits/337.seqid 2024-12-12T16:29:02,426 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,426 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T16:29:02,426 DEBUG [PEWorker-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T16:29:02,427 DEBUG [PEWorker-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-12T16:29:02,432 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212275b5407e97c4f8780d050d1606d81e4_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212275b5407e97c4f8780d050d1606d81e4_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,432 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212189b4239423e406d966a6166e9b0eded_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212189b4239423e406d966a6166e9b0eded_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,432 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123adb7f35931b4166921e091bc587134e_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123adb7f35931b4166921e091bc587134e_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,432 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123e22b04d19ae4f33918c6532282d18f1_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123e22b04d19ae4f33918c6532282d18f1_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,432 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123f4697b44707468e8a6cde529c3f2411_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123f4697b44707468e8a6cde529c3f2411_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,432 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123f4ebb6f7f744113906301f575589914_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123f4ebb6f7f744113906301f575589914_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,432 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126eecd7d0aeae41e0a257a3c7bbb7f178_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126eecd7d0aeae41e0a257a3c7bbb7f178_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,432 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127c3c7ab961f94fa29c8f48fc3266a839_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127c3c7ab961f94fa29c8f48fc3266a839_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,433 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212820dc023975a4f44b8d64025fb02caf5_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212820dc023975a4f44b8d64025fb02caf5_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,433 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128d1bbb3b22f445b9aaf2b612e40b41b5_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128d1bbb3b22f445b9aaf2b612e40b41b5_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,434 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c41be29d9981440895600b6ce03d2d89_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c41be29d9981440895600b6ce03d2d89_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,434 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a0d54180c25b4bf281e45eccad9217d8_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a0d54180c25b4bf281e45eccad9217d8_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,434 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ac2bd4619d444637a5c3542f9a3c0044_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ac2bd4619d444637a5c3542f9a3c0044_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,434 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c7145ba963ed4914a80f7c27c5b8700b_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c7145ba963ed4914a80f7c27c5b8700b_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,434 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c423d5afe99e459cbce14156594c5436_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c423d5afe99e459cbce14156594c5436_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,434 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212cf808ccf5d5140e6ad272145fbbf2e51_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212cf808ccf5d5140e6ad272145fbbf2e51_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,434 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d0d4057f19974e02a113af907facc93d_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d0d4057f19974e02a113af907facc93d_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,435 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ed7e2e53988544e7bed043fd7613b5de_53789b78ac3c456afdd9ca3b09fff4b9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ed7e2e53988544e7bed043fd7613b5de_53789b78ac3c456afdd9ca3b09fff4b9 2024-12-12T16:29:02,435 DEBUG [PEWorker-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T16:29:02,437 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=126, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:29:02,438 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T16:29:02,440 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T16:29:02,440 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=126, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:29:02,440 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T16:29:02,441 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734020942440"}]},"ts":"9223372036854775807"} 2024-12-12T16:29:02,442 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T16:29:02,442 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 53789b78ac3c456afdd9ca3b09fff4b9, NAME => 'TestAcidGuarantees,,1734020908204.53789b78ac3c456afdd9ca3b09fff4b9.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T16:29:02,442 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T16:29:02,442 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734020942442"}]},"ts":"9223372036854775807"} 2024-12-12T16:29:02,443 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T16:29:02,445 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=126, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:29:02,446 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 35 msec 2024-12-12T16:29:02,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-12T16:29:02,512 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-12-12T16:29:02,522 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=245 (was 244) - Thread LEAK? -, OpenFileDescriptor=449 (was 450), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=341 (was 359), ProcessCount=11 (was 11), AvailableMemoryMB=7677 (was 7730) 2024-12-12T16:29:02,531 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=245, OpenFileDescriptor=449, MaxFileDescriptor=1048576, SystemLoadAverage=341, ProcessCount=11, AvailableMemoryMB=7677 2024-12-12T16:29:02,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T16:29:02,532 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T16:29:02,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T16:29:02,533 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T16:29:02,534 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:02,534 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 127 2024-12-12T16:29:02,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-12-12T16:29:02,534 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T16:29:02,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742273_1449 (size=963) 2024-12-12T16:29:02,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-12-12T16:29:02,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-12-12T16:29:02,941 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc 2024-12-12T16:29:02,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742274_1450 (size=53) 2024-12-12T16:29:03,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-12-12T16:29:03,346 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:29:03,346 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 7f9a08e11e132c4f4473bdc5fef699f6, disabling compactions & flushes 2024-12-12T16:29:03,346 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:03,346 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:03,346 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. after waiting 0 ms 2024-12-12T16:29:03,346 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:03,346 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:03,346 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:03,347 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T16:29:03,348 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734020943347"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734020943347"}]},"ts":"1734020943347"} 2024-12-12T16:29:03,348 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T16:29:03,349 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T16:29:03,349 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020943349"}]},"ts":"1734020943349"} 2024-12-12T16:29:03,350 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T16:29:03,354 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7f9a08e11e132c4f4473bdc5fef699f6, ASSIGN}] 2024-12-12T16:29:03,355 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7f9a08e11e132c4f4473bdc5fef699f6, ASSIGN 2024-12-12T16:29:03,355 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7f9a08e11e132c4f4473bdc5fef699f6, ASSIGN; state=OFFLINE, location=4f6a4780a2f6,41933,1734020809476; forceNewPlan=false, retain=false 2024-12-12T16:29:03,506 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=7f9a08e11e132c4f4473bdc5fef699f6, regionState=OPENING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:03,507 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; OpenRegionProcedure 7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:29:03,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-12-12T16:29:03,659 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:03,661 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:03,661 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7285): Opening region: {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} 2024-12-12T16:29:03,661 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:03,662 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:29:03,662 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7327): checking encryption for 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:03,662 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7330): checking classloading for 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:03,663 INFO [StoreOpener-7f9a08e11e132c4f4473bdc5fef699f6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:03,664 INFO [StoreOpener-7f9a08e11e132c4f4473bdc5fef699f6-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:29:03,664 INFO [StoreOpener-7f9a08e11e132c4f4473bdc5fef699f6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7f9a08e11e132c4f4473bdc5fef699f6 columnFamilyName A 2024-12-12T16:29:03,664 DEBUG [StoreOpener-7f9a08e11e132c4f4473bdc5fef699f6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:03,665 INFO [StoreOpener-7f9a08e11e132c4f4473bdc5fef699f6-1 {}] regionserver.HStore(327): Store=7f9a08e11e132c4f4473bdc5fef699f6/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:29:03,665 INFO [StoreOpener-7f9a08e11e132c4f4473bdc5fef699f6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:03,665 INFO [StoreOpener-7f9a08e11e132c4f4473bdc5fef699f6-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:29:03,666 INFO [StoreOpener-7f9a08e11e132c4f4473bdc5fef699f6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7f9a08e11e132c4f4473bdc5fef699f6 columnFamilyName B 2024-12-12T16:29:03,666 DEBUG [StoreOpener-7f9a08e11e132c4f4473bdc5fef699f6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:03,666 INFO [StoreOpener-7f9a08e11e132c4f4473bdc5fef699f6-1 {}] regionserver.HStore(327): Store=7f9a08e11e132c4f4473bdc5fef699f6/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:29:03,666 INFO [StoreOpener-7f9a08e11e132c4f4473bdc5fef699f6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:03,667 INFO [StoreOpener-7f9a08e11e132c4f4473bdc5fef699f6-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:29:03,667 INFO [StoreOpener-7f9a08e11e132c4f4473bdc5fef699f6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7f9a08e11e132c4f4473bdc5fef699f6 columnFamilyName C 2024-12-12T16:29:03,667 DEBUG [StoreOpener-7f9a08e11e132c4f4473bdc5fef699f6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:03,667 INFO [StoreOpener-7f9a08e11e132c4f4473bdc5fef699f6-1 {}] regionserver.HStore(327): Store=7f9a08e11e132c4f4473bdc5fef699f6/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:29:03,668 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:03,668 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:03,668 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:03,669 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T16:29:03,670 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1085): writing seq id for 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:03,672 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T16:29:03,673 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1102): Opened 7f9a08e11e132c4f4473bdc5fef699f6; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65448819, jitterRate=-0.024736598134040833}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T16:29:03,673 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1001): Region open journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:03,674 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., pid=129, masterSystemTime=1734020943658 2024-12-12T16:29:03,675 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:03,675 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:03,676 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=7f9a08e11e132c4f4473bdc5fef699f6, regionState=OPEN, openSeqNum=2, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:03,678 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-12T16:29:03,678 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; OpenRegionProcedure 7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 in 170 msec 2024-12-12T16:29:03,679 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=128, resume processing ppid=127 2024-12-12T16:29:03,679 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, ppid=127, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7f9a08e11e132c4f4473bdc5fef699f6, ASSIGN in 324 msec 2024-12-12T16:29:03,680 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T16:29:03,680 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020943680"}]},"ts":"1734020943680"} 2024-12-12T16:29:03,681 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T16:29:03,687 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T16:29:03,688 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1550 sec 2024-12-12T16:29:04,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-12-12T16:29:04,638 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 127 completed 2024-12-12T16:29:04,639 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7177efc9 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@65df2359 2024-12-12T16:29:04,642 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ef40578, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:04,644 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:04,645 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41922, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:04,646 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T16:29:04,647 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40054, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T16:29:04,648 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61d38088 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7d0ab200 2024-12-12T16:29:04,652 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32bb71c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:04,653 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7043f683 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5871c039 2024-12-12T16:29:04,655 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bc0f7c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:04,656 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b0c2472 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7daa5922 2024-12-12T16:29:04,658 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b8b6e04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:04,659 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x34b30c39 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b7f20c4 2024-12-12T16:29:04,661 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bc486e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:04,662 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d672ed2 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f7c40ba 2024-12-12T16:29:04,664 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2070263a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:04,665 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x41b0e7b6 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6050584c 2024-12-12T16:29:04,667 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@154f0f85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:04,668 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0f2423f3 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6dd48863 2024-12-12T16:29:04,671 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8a917b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:04,672 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x184771cf to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51196534 2024-12-12T16:29:04,675 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54c2725, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:04,675 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x076f0408 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1dc5e114 2024-12-12T16:29:04,678 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79d49886, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:04,679 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0c692575 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3e96b8ad 2024-12-12T16:29:04,682 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@635b1751, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:04,688 DEBUG [hconnection-0x6cb11359-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:04,689 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41928, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:04,690 DEBUG [hconnection-0x7f92839c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:04,690 DEBUG [hconnection-0x8608415-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:04,691 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41940, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:04,691 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41942, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:04,693 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:29:04,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-12-12T16:29:04,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T16:29:04,694 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:29:04,695 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:29:04,695 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:29:04,700 DEBUG [hconnection-0x565a2b34-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:04,701 DEBUG [hconnection-0x752a7e4e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:04,702 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41956, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:04,702 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41966, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:04,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:04,706 DEBUG [hconnection-0x181b269b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:04,706 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:29:04,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:04,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:04,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:04,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:04,707 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41980, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:04,707 DEBUG [hconnection-0x1b6bf802-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:04,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:04,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:04,707 DEBUG [hconnection-0x59b0813c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:04,708 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41986, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:04,708 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42000, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:04,713 DEBUG [hconnection-0x2b5e90b2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:04,714 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42016, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:04,717 DEBUG [hconnection-0x88c0c23-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:04,719 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42018, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:04,730 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:04,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021004724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:04,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:04,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021004729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:04,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:04,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021004730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:04,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:04,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021004731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:04,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:04,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021004732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:04,737 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/ab06a529d7184b7fb272395deced6312 is 50, key is test_row_0/A:col10/1734020944703/Put/seqid=0 2024-12-12T16:29:04,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742275_1451 (size=12001) 2024-12-12T16:29:04,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T16:29:04,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:04,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:04,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021004833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:04,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:04,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021004833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:04,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:04,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021004833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:04,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021004832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:04,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:04,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021004833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:04,846 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:04,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-12T16:29:04,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:04,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:04,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:04,847 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:04,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:04,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:04,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T16:29:04,999 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-12T16:29:05,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:05,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:05,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:05,000 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:05,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021005037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:05,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021005038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:05,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021005038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:05,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021005039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,052 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:05,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021005048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,152 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-12T16:29:05,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:05,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:05,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:05,153 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,156 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/ab06a529d7184b7fb272395deced6312 2024-12-12T16:29:05,177 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/3c3238ac142f4a00807830f5fed10cb1 is 50, key is test_row_0/B:col10/1734020944703/Put/seqid=0 2024-12-12T16:29:05,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742276_1452 (size=12001) 2024-12-12T16:29:05,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T16:29:05,305 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-12T16:29:05,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:05,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:05,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:05,306 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:05,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021005342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:05,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021005344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:05,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021005344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:05,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021005344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:05,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021005353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,459 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,459 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-12T16:29:05,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:05,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:05,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:05,459 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,582 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/3c3238ac142f4a00807830f5fed10cb1 2024-12-12T16:29:05,606 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/6a6bebf0bacb472a93e772b4e2a0c190 is 50, key is test_row_0/C:col10/1734020944703/Put/seqid=0 2024-12-12T16:29:05,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742277_1453 (size=12001) 2024-12-12T16:29:05,611 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,612 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-12T16:29:05,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:05,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:05,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:05,612 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,764 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,765 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-12T16:29:05,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:05,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:05,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:05,765 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T16:29:05,851 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:05,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021005848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,851 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:05,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021005849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:05,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021005852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:05,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021005852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:05,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021005857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,917 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:05,918 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-12T16:29:05,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:05,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:05,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:05,918 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:05,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:06,012 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/6a6bebf0bacb472a93e772b4e2a0c190 2024-12-12T16:29:06,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/ab06a529d7184b7fb272395deced6312 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/ab06a529d7184b7fb272395deced6312 2024-12-12T16:29:06,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/ab06a529d7184b7fb272395deced6312, entries=150, sequenceid=14, filesize=11.7 K 2024-12-12T16:29:06,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/3c3238ac142f4a00807830f5fed10cb1 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/3c3238ac142f4a00807830f5fed10cb1 2024-12-12T16:29:06,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/3c3238ac142f4a00807830f5fed10cb1, entries=150, sequenceid=14, filesize=11.7 K 2024-12-12T16:29:06,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/6a6bebf0bacb472a93e772b4e2a0c190 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/6a6bebf0bacb472a93e772b4e2a0c190 2024-12-12T16:29:06,031 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/6a6bebf0bacb472a93e772b4e2a0c190, entries=150, sequenceid=14, filesize=11.7 K 2024-12-12T16:29:06,032 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 7f9a08e11e132c4f4473bdc5fef699f6 in 1326ms, sequenceid=14, compaction requested=false 2024-12-12T16:29:06,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:06,070 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:06,070 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-12T16:29:06,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:06,071 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T16:29:06,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:06,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:06,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:06,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:06,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:06,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:06,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/502b25779e9941bf8fd4250be71274d2 is 50, key is test_row_0/A:col10/1734020944728/Put/seqid=0 2024-12-12T16:29:06,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742278_1454 (size=12001) 2024-12-12T16:29:06,481 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/502b25779e9941bf8fd4250be71274d2 2024-12-12T16:29:06,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/20b7db9ddb8c4b118222dd7c9ee922c6 is 50, key is test_row_0/B:col10/1734020944728/Put/seqid=0 2024-12-12T16:29:06,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742279_1455 (size=12001) 2024-12-12T16:29:06,520 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/20b7db9ddb8c4b118222dd7c9ee922c6 2024-12-12T16:29:06,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/30c5d557a5da49bd81bf329904cec3e5 is 50, key is test_row_0/C:col10/1734020944728/Put/seqid=0 2024-12-12T16:29:06,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742280_1456 (size=12001) 2024-12-12T16:29:06,550 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/30c5d557a5da49bd81bf329904cec3e5 2024-12-12T16:29:06,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/502b25779e9941bf8fd4250be71274d2 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/502b25779e9941bf8fd4250be71274d2 2024-12-12T16:29:06,559 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/502b25779e9941bf8fd4250be71274d2, entries=150, sequenceid=38, filesize=11.7 K 2024-12-12T16:29:06,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/20b7db9ddb8c4b118222dd7c9ee922c6 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/20b7db9ddb8c4b118222dd7c9ee922c6 2024-12-12T16:29:06,565 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/20b7db9ddb8c4b118222dd7c9ee922c6, entries=150, sequenceid=38, filesize=11.7 K 2024-12-12T16:29:06,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/30c5d557a5da49bd81bf329904cec3e5 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/30c5d557a5da49bd81bf329904cec3e5 2024-12-12T16:29:06,570 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/30c5d557a5da49bd81bf329904cec3e5, entries=150, sequenceid=38, filesize=11.7 K 2024-12-12T16:29:06,571 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 7f9a08e11e132c4f4473bdc5fef699f6 in 499ms, sequenceid=38, compaction requested=false 2024-12-12T16:29:06,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:06,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:06,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-12-12T16:29:06,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-12-12T16:29:06,573 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-12T16:29:06,573 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8770 sec 2024-12-12T16:29:06,575 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 1.8810 sec 2024-12-12T16:29:06,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T16:29:06,798 INFO [Thread-2023 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-12-12T16:29:06,800 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:29:06,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-12-12T16:29:06,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T16:29:06,801 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:29:06,802 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:29:06,802 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:29:06,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:06,868 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:29:06,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:06,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:06,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:06,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:06,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:06,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:06,873 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/3c772500aea64e599ba935e595efb48e is 50, key is test_row_0/A:col10/1734020946862/Put/seqid=0 2024-12-12T16:29:06,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742281_1457 (size=14341) 2024-12-12T16:29:06,878 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/3c772500aea64e599ba935e595efb48e 2024-12-12T16:29:06,886 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/5985ec53a08a4ab8a77102c387654268 is 50, key is test_row_0/B:col10/1734020946862/Put/seqid=0 2024-12-12T16:29:06,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742282_1458 (size=12001) 2024-12-12T16:29:06,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T16:29:06,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:06,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021006898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:06,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:06,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021006899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:06,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:06,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021006905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:06,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:06,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021006907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:06,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:06,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021006908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:06,953 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:06,954 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T16:29:06,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:06,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:06,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:06,954 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:06,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:06,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:07,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:07,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021007009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:07,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021007009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:07,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021007013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:07,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021007017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:07,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021007017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T16:29:07,107 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T16:29:07,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:07,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:07,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:07,107 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:07,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:07,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:07,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:07,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021007214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:07,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021007214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:07,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021007221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:07,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021007223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:07,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021007223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,259 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T16:29:07,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:07,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:07,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:07,260 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:07,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:07,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:07,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/5985ec53a08a4ab8a77102c387654268 2024-12-12T16:29:07,300 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/fbf2fef6bcb64c6096226e45cfbb7c32 is 50, key is test_row_0/C:col10/1734020946862/Put/seqid=0 2024-12-12T16:29:07,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742283_1459 (size=12001) 2024-12-12T16:29:07,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T16:29:07,412 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T16:29:07,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:07,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:07,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:07,413 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:07,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:07,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:07,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:07,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021007520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:07,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021007521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:07,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021007527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:07,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021007527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:07,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021007529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,566 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,566 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T16:29:07,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:07,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:07,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:07,566 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:07,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:07,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:07,652 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T16:29:07,705 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/fbf2fef6bcb64c6096226e45cfbb7c32 2024-12-12T16:29:07,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/3c772500aea64e599ba935e595efb48e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/3c772500aea64e599ba935e595efb48e 2024-12-12T16:29:07,713 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/3c772500aea64e599ba935e595efb48e, entries=200, sequenceid=49, filesize=14.0 K 2024-12-12T16:29:07,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/5985ec53a08a4ab8a77102c387654268 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/5985ec53a08a4ab8a77102c387654268 2024-12-12T16:29:07,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/5985ec53a08a4ab8a77102c387654268, entries=150, sequenceid=49, filesize=11.7 K 2024-12-12T16:29:07,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/fbf2fef6bcb64c6096226e45cfbb7c32 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/fbf2fef6bcb64c6096226e45cfbb7c32 2024-12-12T16:29:07,719 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,719 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T16:29:07,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:07,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:07,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:07,719 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:07,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:07,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:07,723 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/fbf2fef6bcb64c6096226e45cfbb7c32, entries=150, sequenceid=49, filesize=11.7 K 2024-12-12T16:29:07,724 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 7f9a08e11e132c4f4473bdc5fef699f6 in 857ms, sequenceid=49, compaction requested=true 2024-12-12T16:29:07,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:07,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:29:07,724 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:07,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:07,724 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:07,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:29:07,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:07,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:29:07,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:07,730 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:07,730 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:07,730 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/A is initiating minor compaction (all files) 2024-12-12T16:29:07,730 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/B is initiating minor compaction (all files) 2024-12-12T16:29:07,730 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/A in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:07,730 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/B in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:07,730 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/3c3238ac142f4a00807830f5fed10cb1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/20b7db9ddb8c4b118222dd7c9ee922c6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/5985ec53a08a4ab8a77102c387654268] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=35.2 K 2024-12-12T16:29:07,730 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/ab06a529d7184b7fb272395deced6312, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/502b25779e9941bf8fd4250be71274d2, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/3c772500aea64e599ba935e595efb48e] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=37.4 K 2024-12-12T16:29:07,731 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c3238ac142f4a00807830f5fed10cb1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734020944703 2024-12-12T16:29:07,731 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab06a529d7184b7fb272395deced6312, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734020944703 2024-12-12T16:29:07,731 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 20b7db9ddb8c4b118222dd7c9ee922c6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734020944722 2024-12-12T16:29:07,731 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 502b25779e9941bf8fd4250be71274d2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734020944722 2024-12-12T16:29:07,731 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 5985ec53a08a4ab8a77102c387654268, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1734020946862 2024-12-12T16:29:07,732 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c772500aea64e599ba935e595efb48e, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1734020946862 2024-12-12T16:29:07,738 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#B#compaction#384 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:07,738 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#A#compaction#385 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:07,738 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/2caed9e86cbb40e29e433674b92b52ff is 50, key is test_row_0/B:col10/1734020946862/Put/seqid=0 2024-12-12T16:29:07,738 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/8b9caeab00dd499b95da3fcd18b2d4dc is 50, key is test_row_0/A:col10/1734020946862/Put/seqid=0 2024-12-12T16:29:07,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742284_1460 (size=12104) 2024-12-12T16:29:07,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742285_1461 (size=12104) 2024-12-12T16:29:07,871 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:07,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-12T16:29:07,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:07,872 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T16:29:07,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:07,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:07,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:07,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:07,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:07,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:07,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/72a7ae4080f74d0c8e9da45836ceba36 is 50, key is test_row_0/A:col10/1734020946905/Put/seqid=0 2024-12-12T16:29:07,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742286_1462 (size=12001) 2024-12-12T16:29:07,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T16:29:08,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:08,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:08,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:08,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021008038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:08,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:08,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021008040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:08,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021008041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:08,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:08,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021008044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:08,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:08,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021008046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:08,150 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/2caed9e86cbb40e29e433674b92b52ff as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/2caed9e86cbb40e29e433674b92b52ff 2024-12-12T16:29:08,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:08,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021008149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:08,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:08,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021008150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:08,155 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/B of 7f9a08e11e132c4f4473bdc5fef699f6 into 2caed9e86cbb40e29e433674b92b52ff(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:08,155 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:08,155 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/B, priority=13, startTime=1734020947724; duration=0sec 2024-12-12T16:29:08,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:08,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021008152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:08,155 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:08,155 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:B 2024-12-12T16:29:08,155 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:08,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:08,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021008152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:08,157 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:08,157 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/C is initiating minor compaction (all files) 2024-12-12T16:29:08,158 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/C in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:08,158 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/6a6bebf0bacb472a93e772b4e2a0c190, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/30c5d557a5da49bd81bf329904cec3e5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/fbf2fef6bcb64c6096226e45cfbb7c32] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=35.2 K 2024-12-12T16:29:08,158 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a6bebf0bacb472a93e772b4e2a0c190, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734020944703 2024-12-12T16:29:08,159 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 30c5d557a5da49bd81bf329904cec3e5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734020944722 2024-12-12T16:29:08,159 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting fbf2fef6bcb64c6096226e45cfbb7c32, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1734020946862 2024-12-12T16:29:08,160 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/8b9caeab00dd499b95da3fcd18b2d4dc as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/8b9caeab00dd499b95da3fcd18b2d4dc 2024-12-12T16:29:08,165 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/A of 7f9a08e11e132c4f4473bdc5fef699f6 into 8b9caeab00dd499b95da3fcd18b2d4dc(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:08,165 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:08,165 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/A, priority=13, startTime=1734020947724; duration=0sec 2024-12-12T16:29:08,165 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:08,165 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:A 2024-12-12T16:29:08,181 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#C#compaction#387 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:08,182 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/d9fb3df42f254ffba9bed1983a630de8 is 50, key is test_row_0/C:col10/1734020946862/Put/seqid=0 2024-12-12T16:29:08,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742287_1463 (size=12104) 2024-12-12T16:29:08,204 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/d9fb3df42f254ffba9bed1983a630de8 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/d9fb3df42f254ffba9bed1983a630de8 2024-12-12T16:29:08,210 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/C of 7f9a08e11e132c4f4473bdc5fef699f6 into d9fb3df42f254ffba9bed1983a630de8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:08,210 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:08,210 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/C, priority=13, startTime=1734020947724; duration=0sec 2024-12-12T16:29:08,210 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:08,211 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:C 2024-12-12T16:29:08,290 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/72a7ae4080f74d0c8e9da45836ceba36 2024-12-12T16:29:08,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/a3ccb499f99d4ff593e8703f106faaf9 is 50, key is test_row_0/B:col10/1734020946905/Put/seqid=0 2024-12-12T16:29:08,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742288_1464 (size=12001) 2024-12-12T16:29:08,356 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021008356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:08,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021008357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:08,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021008357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:08,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021008358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:08,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:08,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021008658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:08,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:08,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021008659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:08,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:08,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021008659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:08,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:08,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021008660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:08,714 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/a3ccb499f99d4ff593e8703f106faaf9 2024-12-12T16:29:08,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/f59f7f5bf6274d7487a5c62d1e52f021 is 50, key is test_row_0/C:col10/1734020946905/Put/seqid=0 2024-12-12T16:29:08,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742289_1465 (size=12001) 2024-12-12T16:29:08,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T16:29:09,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021009052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,126 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/f59f7f5bf6274d7487a5c62d1e52f021 2024-12-12T16:29:09,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/72a7ae4080f74d0c8e9da45836ceba36 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/72a7ae4080f74d0c8e9da45836ceba36 2024-12-12T16:29:09,133 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/72a7ae4080f74d0c8e9da45836ceba36, entries=150, sequenceid=75, filesize=11.7 K 2024-12-12T16:29:09,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/a3ccb499f99d4ff593e8703f106faaf9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/a3ccb499f99d4ff593e8703f106faaf9 2024-12-12T16:29:09,143 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/a3ccb499f99d4ff593e8703f106faaf9, entries=150, sequenceid=75, filesize=11.7 K 2024-12-12T16:29:09,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/f59f7f5bf6274d7487a5c62d1e52f021 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/f59f7f5bf6274d7487a5c62d1e52f021 2024-12-12T16:29:09,147 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/f59f7f5bf6274d7487a5c62d1e52f021, entries=150, sequenceid=75, filesize=11.7 K 2024-12-12T16:29:09,148 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 7f9a08e11e132c4f4473bdc5fef699f6 in 1276ms, sequenceid=75, compaction requested=false 2024-12-12T16:29:09,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:09,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:09,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-12-12T16:29:09,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-12-12T16:29:09,151 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-12T16:29:09,151 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3480 sec 2024-12-12T16:29:09,152 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 2.3510 sec 2024-12-12T16:29:09,167 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:29:09,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:09,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:09,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:09,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:09,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:09,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:09,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:09,182 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/fc33bc7e9a1846cf9fc33fe23904591e is 50, key is test_row_0/A:col10/1734020949165/Put/seqid=0 2024-12-12T16:29:09,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742290_1466 (size=14341) 2024-12-12T16:29:09,191 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/fc33bc7e9a1846cf9fc33fe23904591e 2024-12-12T16:29:09,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/d2ddbc536cc44a16b07bf0ec7657adee is 50, key is test_row_0/B:col10/1734020949165/Put/seqid=0 2024-12-12T16:29:09,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742291_1467 (size=12001) 2024-12-12T16:29:09,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021009205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021009212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021009213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021009214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021009315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021009321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021009321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021009321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,519 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021009519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,526 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021009524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021009524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,528 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021009525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,614 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/d2ddbc536cc44a16b07bf0ec7657adee 2024-12-12T16:29:09,625 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/e7df1806ade34566ad59d7612d32e7db is 50, key is test_row_0/C:col10/1734020949165/Put/seqid=0 2024-12-12T16:29:09,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742292_1468 (size=12001) 2024-12-12T16:29:09,635 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/e7df1806ade34566ad59d7612d32e7db 2024-12-12T16:29:09,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/fc33bc7e9a1846cf9fc33fe23904591e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/fc33bc7e9a1846cf9fc33fe23904591e 2024-12-12T16:29:09,643 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/fc33bc7e9a1846cf9fc33fe23904591e, entries=200, sequenceid=89, filesize=14.0 K 2024-12-12T16:29:09,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/d2ddbc536cc44a16b07bf0ec7657adee as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d2ddbc536cc44a16b07bf0ec7657adee 2024-12-12T16:29:09,651 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d2ddbc536cc44a16b07bf0ec7657adee, entries=150, sequenceid=89, filesize=11.7 K 2024-12-12T16:29:09,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/e7df1806ade34566ad59d7612d32e7db as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/e7df1806ade34566ad59d7612d32e7db 2024-12-12T16:29:09,656 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/e7df1806ade34566ad59d7612d32e7db, entries=150, sequenceid=89, filesize=11.7 K 2024-12-12T16:29:09,658 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7f9a08e11e132c4f4473bdc5fef699f6 in 491ms, sequenceid=89, compaction requested=true 2024-12-12T16:29:09,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:09,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:29:09,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:09,658 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:09,658 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:09,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:29:09,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:09,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:29:09,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:09,659 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:09,659 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/B is initiating minor compaction (all files) 2024-12-12T16:29:09,659 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/B in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:09,659 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/2caed9e86cbb40e29e433674b92b52ff, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/a3ccb499f99d4ff593e8703f106faaf9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d2ddbc536cc44a16b07bf0ec7657adee] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=35.3 K 2024-12-12T16:29:09,660 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:09,660 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/A is initiating minor compaction (all files) 2024-12-12T16:29:09,660 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/A in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:09,660 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/8b9caeab00dd499b95da3fcd18b2d4dc, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/72a7ae4080f74d0c8e9da45836ceba36, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/fc33bc7e9a1846cf9fc33fe23904591e] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=37.5 K 2024-12-12T16:29:09,660 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b9caeab00dd499b95da3fcd18b2d4dc, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1734020946862 2024-12-12T16:29:09,660 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 2caed9e86cbb40e29e433674b92b52ff, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1734020946862 2024-12-12T16:29:09,661 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting a3ccb499f99d4ff593e8703f106faaf9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734020946897 2024-12-12T16:29:09,661 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72a7ae4080f74d0c8e9da45836ceba36, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734020946897 2024-12-12T16:29:09,661 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting d2ddbc536cc44a16b07bf0ec7657adee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734020948039 2024-12-12T16:29:09,661 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc33bc7e9a1846cf9fc33fe23904591e, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734020948039 2024-12-12T16:29:09,678 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#B#compaction#393 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:09,678 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/f9f34da35d82493fb664ff0ab6ea4dae is 50, key is test_row_0/B:col10/1734020949165/Put/seqid=0 2024-12-12T16:29:09,686 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#A#compaction#394 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:09,687 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/194ebda3369f48f690b5cbc14111f812 is 50, key is test_row_0/A:col10/1734020949165/Put/seqid=0 2024-12-12T16:29:09,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742293_1469 (size=12207) 2024-12-12T16:29:09,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742294_1470 (size=12207) 2024-12-12T16:29:09,709 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/194ebda3369f48f690b5cbc14111f812 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/194ebda3369f48f690b5cbc14111f812 2024-12-12T16:29:09,713 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/A of 7f9a08e11e132c4f4473bdc5fef699f6 into 194ebda3369f48f690b5cbc14111f812(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:09,713 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:09,713 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/A, priority=13, startTime=1734020949658; duration=0sec 2024-12-12T16:29:09,713 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:09,713 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:A 2024-12-12T16:29:09,713 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:09,714 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:09,714 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/C is initiating minor compaction (all files) 2024-12-12T16:29:09,714 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/C in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:09,714 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/d9fb3df42f254ffba9bed1983a630de8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/f59f7f5bf6274d7487a5c62d1e52f021, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/e7df1806ade34566ad59d7612d32e7db] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=35.3 K 2024-12-12T16:29:09,716 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9fb3df42f254ffba9bed1983a630de8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1734020946862 2024-12-12T16:29:09,716 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting f59f7f5bf6274d7487a5c62d1e52f021, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734020946897 2024-12-12T16:29:09,716 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7df1806ade34566ad59d7612d32e7db, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734020948039 2024-12-12T16:29:09,726 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#C#compaction#395 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:09,726 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/fd651e3b5e8e4452b62d0cdf21ffd2cd is 50, key is test_row_0/C:col10/1734020949165/Put/seqid=0 2024-12-12T16:29:09,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742295_1471 (size=12207) 2024-12-12T16:29:09,777 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/fd651e3b5e8e4452b62d0cdf21ffd2cd as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/fd651e3b5e8e4452b62d0cdf21ffd2cd 2024-12-12T16:29:09,783 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/C of 7f9a08e11e132c4f4473bdc5fef699f6 into fd651e3b5e8e4452b62d0cdf21ffd2cd(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:09,783 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:09,783 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/C, priority=13, startTime=1734020949658; duration=0sec 2024-12-12T16:29:09,783 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:09,783 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:C 2024-12-12T16:29:09,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:09,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T16:29:09,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:09,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:09,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:09,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:09,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:09,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:09,830 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/e7361bd2a95448618843d871345d2518 is 50, key is test_row_0/A:col10/1734020949210/Put/seqid=0 2024-12-12T16:29:09,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021009835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742296_1472 (size=14341) 2024-12-12T16:29:09,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021009837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021009838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021009840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021009941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021009946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021009946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:09,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:09,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021009951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:10,105 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/f9f34da35d82493fb664ff0ab6ea4dae as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/f9f34da35d82493fb664ff0ab6ea4dae 2024-12-12T16:29:10,109 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/B of 7f9a08e11e132c4f4473bdc5fef699f6 into f9f34da35d82493fb664ff0ab6ea4dae(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:10,109 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:10,109 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/B, priority=13, startTime=1734020949658; duration=0sec 2024-12-12T16:29:10,110 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:10,110 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:B 2024-12-12T16:29:10,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:10,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021010144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:10,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:10,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021010149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:10,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:10,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021010150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:10,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:10,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021010157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:10,244 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/e7361bd2a95448618843d871345d2518 2024-12-12T16:29:10,252 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/d1827814fdcf4f389f5293c06a1b6a10 is 50, key is test_row_0/B:col10/1734020949210/Put/seqid=0 2024-12-12T16:29:10,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742297_1473 (size=12001) 2024-12-12T16:29:10,258 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/d1827814fdcf4f389f5293c06a1b6a10 2024-12-12T16:29:10,267 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/9a0feaf138724e9bb824767b44faf6bb is 50, key is test_row_0/C:col10/1734020949210/Put/seqid=0 2024-12-12T16:29:10,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742298_1474 (size=12001) 2024-12-12T16:29:10,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:10,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021010450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:10,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:10,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021010453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:10,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:10,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021010453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:10,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:10,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021010461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:10,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/9a0feaf138724e9bb824767b44faf6bb 2024-12-12T16:29:10,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/e7361bd2a95448618843d871345d2518 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/e7361bd2a95448618843d871345d2518 2024-12-12T16:29:10,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/e7361bd2a95448618843d871345d2518, entries=200, sequenceid=117, filesize=14.0 K 2024-12-12T16:29:10,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/d1827814fdcf4f389f5293c06a1b6a10 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d1827814fdcf4f389f5293c06a1b6a10 2024-12-12T16:29:10,693 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d1827814fdcf4f389f5293c06a1b6a10, entries=150, sequenceid=117, filesize=11.7 K 2024-12-12T16:29:10,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/9a0feaf138724e9bb824767b44faf6bb as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/9a0feaf138724e9bb824767b44faf6bb 2024-12-12T16:29:10,697 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/9a0feaf138724e9bb824767b44faf6bb, entries=150, sequenceid=117, filesize=11.7 K 2024-12-12T16:29:10,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 7f9a08e11e132c4f4473bdc5fef699f6 in 873ms, sequenceid=117, compaction requested=false 2024-12-12T16:29:10,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:10,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-12T16:29:10,906 INFO [Thread-2023 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-12-12T16:29:10,907 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:29:10,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-12-12T16:29:10,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-12T16:29:10,909 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:29:10,909 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:29:10,910 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:29:10,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:10,955 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:29:10,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:10,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:10,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:10,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:10,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:10,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:10,960 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/6c3218b0d2fc496fb44b5e79f60a55c7 is 50, key is test_row_0/A:col10/1734020949831/Put/seqid=0 2024-12-12T16:29:10,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742299_1475 (size=14391) 2024-12-12T16:29:10,971 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/6c3218b0d2fc496fb44b5e79f60a55c7 2024-12-12T16:29:10,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/d2fcbeddd8b84fb088c55f4e921f2f2e is 50, key is test_row_0/B:col10/1734020949831/Put/seqid=0 2024-12-12T16:29:10,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742300_1476 (size=12051) 2024-12-12T16:29:10,988 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/d2fcbeddd8b84fb088c55f4e921f2f2e 2024-12-12T16:29:10,997 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/a38f787c0fab4164a2ad4a025167a947 is 50, key is test_row_0/C:col10/1734020949831/Put/seqid=0 2024-12-12T16:29:11,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742301_1477 (size=12051) 2024-12-12T16:29:11,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021011003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-12T16:29:11,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021011004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021011005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021011006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,061 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,062 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-12T16:29:11,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:11,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:11,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:11,062 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:11,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:11,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:11,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021011071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,075 DEBUG [Thread-2013 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4168 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:29:11,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021011110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021011111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021011111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021011111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-12T16:29:11,214 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,215 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-12T16:29:11,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:11,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:11,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:11,215 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:11,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:11,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:11,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021011315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021011315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021011316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021011316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,367 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,368 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-12T16:29:11,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:11,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:11,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:11,368 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:11,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:11,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:11,403 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/a38f787c0fab4164a2ad4a025167a947 2024-12-12T16:29:11,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/6c3218b0d2fc496fb44b5e79f60a55c7 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/6c3218b0d2fc496fb44b5e79f60a55c7 2024-12-12T16:29:11,410 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/6c3218b0d2fc496fb44b5e79f60a55c7, entries=200, sequenceid=129, filesize=14.1 K 2024-12-12T16:29:11,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/d2fcbeddd8b84fb088c55f4e921f2f2e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d2fcbeddd8b84fb088c55f4e921f2f2e 2024-12-12T16:29:11,414 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d2fcbeddd8b84fb088c55f4e921f2f2e, entries=150, sequenceid=129, filesize=11.8 K 2024-12-12T16:29:11,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/a38f787c0fab4164a2ad4a025167a947 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/a38f787c0fab4164a2ad4a025167a947 2024-12-12T16:29:11,418 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/a38f787c0fab4164a2ad4a025167a947, entries=150, sequenceid=129, filesize=11.8 K 2024-12-12T16:29:11,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7f9a08e11e132c4f4473bdc5fef699f6 in 464ms, sequenceid=129, compaction requested=true 2024-12-12T16:29:11,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:11,419 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:11,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:29:11,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:11,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:29:11,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:11,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:29:11,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:11,420 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:11,421 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40939 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:11,421 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/A is initiating minor compaction (all files) 2024-12-12T16:29:11,421 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/A in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:11,421 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/194ebda3369f48f690b5cbc14111f812, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/e7361bd2a95448618843d871345d2518, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/6c3218b0d2fc496fb44b5e79f60a55c7] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=40.0 K 2024-12-12T16:29:11,422 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:11,422 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/B is initiating minor compaction (all files) 2024-12-12T16:29:11,422 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/B in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:11,422 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/f9f34da35d82493fb664ff0ab6ea4dae, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d1827814fdcf4f389f5293c06a1b6a10, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d2fcbeddd8b84fb088c55f4e921f2f2e] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=35.4 K 2024-12-12T16:29:11,422 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 194ebda3369f48f690b5cbc14111f812, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734020948039 2024-12-12T16:29:11,423 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting e7361bd2a95448618843d871345d2518, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734020949203 2024-12-12T16:29:11,423 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting f9f34da35d82493fb664ff0ab6ea4dae, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734020948039 2024-12-12T16:29:11,423 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c3218b0d2fc496fb44b5e79f60a55c7, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734020949831 2024-12-12T16:29:11,423 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1827814fdcf4f389f5293c06a1b6a10, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734020949210 2024-12-12T16:29:11,424 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2fcbeddd8b84fb088c55f4e921f2f2e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734020949831 2024-12-12T16:29:11,433 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#A#compaction#402 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:11,433 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/9c6c013b733a46be8d040191827ad816 is 50, key is test_row_0/A:col10/1734020949831/Put/seqid=0 2024-12-12T16:29:11,434 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#B#compaction#403 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:11,435 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/0d9c00e20d244e28a269c32312d05a65 is 50, key is test_row_0/B:col10/1734020949831/Put/seqid=0 2024-12-12T16:29:11,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742302_1478 (size=12359) 2024-12-12T16:29:11,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742303_1479 (size=12359) 2024-12-12T16:29:11,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-12T16:29:11,520 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-12T16:29:11,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:11,521 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T16:29:11,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:11,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:11,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:11,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:11,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:11,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:11,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/dc8faf73f74847c691c6f687475d4d0a is 50, key is test_row_0/A:col10/1734020951004/Put/seqid=0 2024-12-12T16:29:11,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742304_1480 (size=12151) 2024-12-12T16:29:11,534 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/dc8faf73f74847c691c6f687475d4d0a 2024-12-12T16:29:11,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/e4beb31865c149559d9d60230d95103f is 50, key is test_row_0/B:col10/1734020951004/Put/seqid=0 2024-12-12T16:29:11,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742305_1481 (size=12151) 2024-12-12T16:29:11,552 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/e4beb31865c149559d9d60230d95103f 2024-12-12T16:29:11,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/89398b25d08f437eb622252245dad65b is 50, key is test_row_0/C:col10/1734020951004/Put/seqid=0 2024-12-12T16:29:11,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742306_1482 (size=12151) 2024-12-12T16:29:11,564 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/89398b25d08f437eb622252245dad65b 2024-12-12T16:29:11,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/dc8faf73f74847c691c6f687475d4d0a as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/dc8faf73f74847c691c6f687475d4d0a 2024-12-12T16:29:11,573 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/dc8faf73f74847c691c6f687475d4d0a, entries=150, sequenceid=154, filesize=11.9 K 2024-12-12T16:29:11,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/e4beb31865c149559d9d60230d95103f as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/e4beb31865c149559d9d60230d95103f 2024-12-12T16:29:11,576 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/e4beb31865c149559d9d60230d95103f, entries=150, sequenceid=154, filesize=11.9 K 2024-12-12T16:29:11,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/89398b25d08f437eb622252245dad65b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/89398b25d08f437eb622252245dad65b 2024-12-12T16:29:11,581 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/89398b25d08f437eb622252245dad65b, entries=150, sequenceid=154, filesize=11.9 K 2024-12-12T16:29:11,581 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for 7f9a08e11e132c4f4473bdc5fef699f6 in 60ms, sequenceid=154, compaction requested=true 2024-12-12T16:29:11,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:11,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:11,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-12-12T16:29:11,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-12-12T16:29:11,584 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-12-12T16:29:11,584 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 673 msec 2024-12-12T16:29:11,586 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 678 msec 2024-12-12T16:29:11,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:11,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:29:11,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:11,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:11,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:11,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:11,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:11,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:11,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/32ce91b4a58d46199e8c2a1da1f95c8d is 50, key is test_row_0/A:col10/1734020951622/Put/seqid=0 2024-12-12T16:29:11,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742307_1483 (size=14541) 2024-12-12T16:29:11,642 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/32ce91b4a58d46199e8c2a1da1f95c8d 2024-12-12T16:29:11,647 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/afe4fac00ee243cfa355eb2ac234b25e is 50, key is test_row_0/B:col10/1734020951622/Put/seqid=0 2024-12-12T16:29:11,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742308_1484 (size=12151) 2024-12-12T16:29:11,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021011668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021011669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021011670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,678 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021011671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021011777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021011777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021011777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021011779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,853 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/9c6c013b733a46be8d040191827ad816 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/9c6c013b733a46be8d040191827ad816 2024-12-12T16:29:11,857 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/A of 7f9a08e11e132c4f4473bdc5fef699f6 into 9c6c013b733a46be8d040191827ad816(size=12.1 K), total size for store is 23.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:11,857 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:11,857 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/A, priority=13, startTime=1734020951419; duration=0sec 2024-12-12T16:29:11,858 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:11,858 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:A 2024-12-12T16:29:11,858 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:29:11,859 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48410 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:29:11,859 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/C is initiating minor compaction (all files) 2024-12-12T16:29:11,859 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/C in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:11,859 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/fd651e3b5e8e4452b62d0cdf21ffd2cd, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/9a0feaf138724e9bb824767b44faf6bb, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/a38f787c0fab4164a2ad4a025167a947, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/89398b25d08f437eb622252245dad65b] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=47.3 K 2024-12-12T16:29:11,859 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting fd651e3b5e8e4452b62d0cdf21ffd2cd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734020948039 2024-12-12T16:29:11,860 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a0feaf138724e9bb824767b44faf6bb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734020949210 2024-12-12T16:29:11,860 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting a38f787c0fab4164a2ad4a025167a947, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734020949831 2024-12-12T16:29:11,860 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 89398b25d08f437eb622252245dad65b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734020951002 2024-12-12T16:29:11,866 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/0d9c00e20d244e28a269c32312d05a65 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/0d9c00e20d244e28a269c32312d05a65 2024-12-12T16:29:11,869 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#C#compaction#409 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:11,869 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/2e89038b79e249dea46a2b8f373d3ec3 is 50, key is test_row_0/C:col10/1734020951004/Put/seqid=0 2024-12-12T16:29:11,870 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/B of 7f9a08e11e132c4f4473bdc5fef699f6 into 0d9c00e20d244e28a269c32312d05a65(size=12.1 K), total size for store is 23.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:11,871 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:11,871 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/B, priority=13, startTime=1734020951420; duration=0sec 2024-12-12T16:29:11,871 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:11,871 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:B 2024-12-12T16:29:11,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742309_1485 (size=12493) 2024-12-12T16:29:11,877 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/2e89038b79e249dea46a2b8f373d3ec3 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/2e89038b79e249dea46a2b8f373d3ec3 2024-12-12T16:29:11,881 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/C of 7f9a08e11e132c4f4473bdc5fef699f6 into 2e89038b79e249dea46a2b8f373d3ec3(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:11,881 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:11,881 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/C, priority=12, startTime=1734020951420; duration=0sec 2024-12-12T16:29:11,881 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:11,881 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:C 2024-12-12T16:29:11,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021011984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021011984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021011984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:11,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:11,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021011985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:12,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-12T16:29:12,012 INFO [Thread-2023 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-12T16:29:12,013 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:29:12,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-12-12T16:29:12,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T16:29:12,015 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:29:12,015 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:29:12,016 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:29:12,051 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/afe4fac00ee243cfa355eb2ac234b25e 2024-12-12T16:29:12,058 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/0c3c22f5d8a04837856abd6986b9bac5 is 50, key is test_row_0/C:col10/1734020951622/Put/seqid=0 2024-12-12T16:29:12,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742310_1486 (size=12151) 2024-12-12T16:29:12,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T16:29:12,167 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:12,168 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-12T16:29:12,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:12,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:12,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:12,168 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:12,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:12,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:12,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:12,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021012288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:12,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:12,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021012290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:12,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:12,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021012290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:12,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:12,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021012290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:12,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T16:29:12,320 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:12,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-12T16:29:12,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:12,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:12,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:12,321 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:12,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:12,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:12,474 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:12,475 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-12T16:29:12,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:12,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:12,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:12,475 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:12,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:12,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:12,477 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/0c3c22f5d8a04837856abd6986b9bac5 2024-12-12T16:29:12,482 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/32ce91b4a58d46199e8c2a1da1f95c8d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/32ce91b4a58d46199e8c2a1da1f95c8d 2024-12-12T16:29:12,485 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/32ce91b4a58d46199e8c2a1da1f95c8d, entries=200, sequenceid=165, filesize=14.2 K 2024-12-12T16:29:12,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/afe4fac00ee243cfa355eb2ac234b25e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/afe4fac00ee243cfa355eb2ac234b25e 2024-12-12T16:29:12,489 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/afe4fac00ee243cfa355eb2ac234b25e, entries=150, sequenceid=165, filesize=11.9 K 2024-12-12T16:29:12,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/0c3c22f5d8a04837856abd6986b9bac5 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/0c3c22f5d8a04837856abd6986b9bac5 2024-12-12T16:29:12,493 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/0c3c22f5d8a04837856abd6986b9bac5, entries=150, sequenceid=165, filesize=11.9 K 2024-12-12T16:29:12,494 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7f9a08e11e132c4f4473bdc5fef699f6 in 863ms, sequenceid=165, compaction requested=true 2024-12-12T16:29:12,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:12,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:29:12,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:12,494 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:12,494 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:12,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:29:12,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:12,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:29:12,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:12,495 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:12,495 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/B is initiating minor compaction (all files) 2024-12-12T16:29:12,495 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/B in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:12,495 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/0d9c00e20d244e28a269c32312d05a65, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/e4beb31865c149559d9d60230d95103f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/afe4fac00ee243cfa355eb2ac234b25e] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=35.8 K 2024-12-12T16:29:12,495 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39051 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:12,495 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/A is initiating minor compaction (all files) 2024-12-12T16:29:12,495 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/A in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:12,496 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/9c6c013b733a46be8d040191827ad816, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/dc8faf73f74847c691c6f687475d4d0a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/32ce91b4a58d46199e8c2a1da1f95c8d] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=38.1 K 2024-12-12T16:29:12,496 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c6c013b733a46be8d040191827ad816, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734020949831 2024-12-12T16:29:12,496 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d9c00e20d244e28a269c32312d05a65, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734020949831 2024-12-12T16:29:12,496 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting e4beb31865c149559d9d60230d95103f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734020951002 2024-12-12T16:29:12,496 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc8faf73f74847c691c6f687475d4d0a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734020951002 2024-12-12T16:29:12,497 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting afe4fac00ee243cfa355eb2ac234b25e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1734020951622 2024-12-12T16:29:12,497 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32ce91b4a58d46199e8c2a1da1f95c8d, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1734020951622 2024-12-12T16:29:12,504 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#A#compaction#411 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:12,505 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/dd4a29ab7a6e4688b2420764206a59b0 is 50, key is test_row_0/A:col10/1734020951622/Put/seqid=0 2024-12-12T16:29:12,505 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#B#compaction#412 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:12,506 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/21131d683f534e50b675e55c117d8882 is 50, key is test_row_0/B:col10/1734020951622/Put/seqid=0 2024-12-12T16:29:12,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742311_1487 (size=12561) 2024-12-12T16:29:12,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742312_1488 (size=12561) 2024-12-12T16:29:12,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T16:29:12,627 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:12,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-12T16:29:12,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:12,628 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T16:29:12,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:12,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:12,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:12,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:12,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:12,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:12,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/7630274673bc42dba091ba25a08cd711 is 50, key is test_row_0/A:col10/1734020951661/Put/seqid=0 2024-12-12T16:29:12,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742313_1489 (size=12151) 2024-12-12T16:29:12,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:12,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:12,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:12,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021012805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:12,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:12,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021012810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:12,819 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:12,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021012811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:12,819 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:12,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021012811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:12,914 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/21131d683f534e50b675e55c117d8882 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/21131d683f534e50b675e55c117d8882 2024-12-12T16:29:12,914 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/dd4a29ab7a6e4688b2420764206a59b0 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/dd4a29ab7a6e4688b2420764206a59b0 2024-12-12T16:29:12,917 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:12,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021012912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:12,919 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/A of 7f9a08e11e132c4f4473bdc5fef699f6 into dd4a29ab7a6e4688b2420764206a59b0(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:12,919 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/B of 7f9a08e11e132c4f4473bdc5fef699f6 into 21131d683f534e50b675e55c117d8882(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:12,919 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:12,919 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:12,919 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/B, priority=13, startTime=1734020952494; duration=0sec 2024-12-12T16:29:12,919 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/A, priority=13, startTime=1734020952494; duration=0sec 2024-12-12T16:29:12,920 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:12,920 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:A 2024-12-12T16:29:12,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:12,920 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-12T16:29:12,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021012916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:12,920 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:12,920 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:B 2024-12-12T16:29:12,921 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-12T16:29:12,921 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-12T16:29:12,921 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. because compaction request was cancelled 2024-12-12T16:29:12,921 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:C 2024-12-12T16:29:12,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:12,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021012920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:12,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:12,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021012921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:13,037 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/7630274673bc42dba091ba25a08cd711 2024-12-12T16:29:13,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/8ecff760ab13415bb877bbeafac3d348 is 50, key is test_row_0/B:col10/1734020951661/Put/seqid=0 2024-12-12T16:29:13,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742314_1490 (size=12151) 2024-12-12T16:29:13,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:13,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021013118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:13,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T16:29:13,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:13,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021013121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:13,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:13,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021013125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:13,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:13,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021013126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:13,428 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:13,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021013426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:13,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:13,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021013429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:13,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:13,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:13,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021013430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:13,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021013429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:13,449 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/8ecff760ab13415bb877bbeafac3d348 2024-12-12T16:29:13,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/f0df70f440dc45a292651a3cf1b1ef8b is 50, key is test_row_0/C:col10/1734020951661/Put/seqid=0 2024-12-12T16:29:13,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742315_1491 (size=12151) 2024-12-12T16:29:13,861 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/f0df70f440dc45a292651a3cf1b1ef8b 2024-12-12T16:29:13,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/7630274673bc42dba091ba25a08cd711 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/7630274673bc42dba091ba25a08cd711 2024-12-12T16:29:13,868 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/7630274673bc42dba091ba25a08cd711, entries=150, sequenceid=193, filesize=11.9 K 2024-12-12T16:29:13,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/8ecff760ab13415bb877bbeafac3d348 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/8ecff760ab13415bb877bbeafac3d348 2024-12-12T16:29:13,872 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/8ecff760ab13415bb877bbeafac3d348, entries=150, sequenceid=193, filesize=11.9 K 2024-12-12T16:29:13,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/f0df70f440dc45a292651a3cf1b1ef8b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/f0df70f440dc45a292651a3cf1b1ef8b 2024-12-12T16:29:13,876 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/f0df70f440dc45a292651a3cf1b1ef8b, entries=150, sequenceid=193, filesize=11.9 K 2024-12-12T16:29:13,877 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 7f9a08e11e132c4f4473bdc5fef699f6 in 1249ms, sequenceid=193, compaction requested=true 2024-12-12T16:29:13,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:13,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:13,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-12-12T16:29:13,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-12-12T16:29:13,881 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-12T16:29:13,881 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8630 sec 2024-12-12T16:29:13,883 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.8690 sec 2024-12-12T16:29:13,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:13,935 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T16:29:13,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:13,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:13,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:13,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:13,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:13,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:13,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/ea2f3099aedd4a2ca67429b66feec58c is 50, key is test_row_0/A:col10/1734020952808/Put/seqid=0 2024-12-12T16:29:13,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742316_1492 (size=14541) 2024-12-12T16:29:14,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:14,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021014003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:14,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021014004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:14,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021014010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:14,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021014011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,116 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:14,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021014115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:14,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021014115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-12T16:29:14,124 INFO [Thread-2023 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-12-12T16:29:14,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:14,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021014120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:14,125 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:29:14,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021014120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-12-12T16:29:14,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-12T16:29:14,127 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:29:14,127 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:29:14,128 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:29:14,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-12T16:29:14,279 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-12T16:29:14,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:14,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:14,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:14,280 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:14,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:14,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:14,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:14,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021014318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:14,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021014318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:14,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021014326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:14,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021014327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,344 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/ea2f3099aedd4a2ca67429b66feec58c 2024-12-12T16:29:14,351 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/7aa29c93eb744e6488d3d79a1942878e is 50, key is test_row_0/B:col10/1734020952808/Put/seqid=0 2024-12-12T16:29:14,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742317_1493 (size=12151) 2024-12-12T16:29:14,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-12T16:29:14,432 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-12T16:29:14,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:14,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:14,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:14,433 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:14,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:14,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:14,585 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-12T16:29:14,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:14,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:14,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:14,586 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:14,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:14,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:14,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:14,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021014624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:14,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021014626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:14,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021014635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:14,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021014636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-12T16:29:14,738 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-12T16:29:14,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:14,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:14,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:14,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:14,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:14,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:14,755 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/7aa29c93eb744e6488d3d79a1942878e 2024-12-12T16:29:14,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/03a9a597c35b4a179b2a19b37ce749c8 is 50, key is test_row_0/C:col10/1734020952808/Put/seqid=0 2024-12-12T16:29:14,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742318_1494 (size=12151) 2024-12-12T16:29:14,767 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/03a9a597c35b4a179b2a19b37ce749c8 2024-12-12T16:29:14,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/ea2f3099aedd4a2ca67429b66feec58c as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/ea2f3099aedd4a2ca67429b66feec58c 2024-12-12T16:29:14,774 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/ea2f3099aedd4a2ca67429b66feec58c, entries=200, sequenceid=207, filesize=14.2 K 2024-12-12T16:29:14,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/7aa29c93eb744e6488d3d79a1942878e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/7aa29c93eb744e6488d3d79a1942878e 2024-12-12T16:29:14,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/7aa29c93eb744e6488d3d79a1942878e, entries=150, sequenceid=207, filesize=11.9 K 2024-12-12T16:29:14,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/03a9a597c35b4a179b2a19b37ce749c8 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/03a9a597c35b4a179b2a19b37ce749c8 2024-12-12T16:29:14,783 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/03a9a597c35b4a179b2a19b37ce749c8, entries=150, sequenceid=207, filesize=11.9 K 2024-12-12T16:29:14,784 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 7f9a08e11e132c4f4473bdc5fef699f6 in 850ms, sequenceid=207, compaction requested=true 2024-12-12T16:29:14,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:14,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:29:14,784 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:14,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:14,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:29:14,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:14,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:29:14,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:14,784 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:14,785 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39253 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:14,785 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:14,785 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/A is initiating minor compaction (all files) 2024-12-12T16:29:14,785 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/B is initiating minor compaction (all files) 2024-12-12T16:29:14,785 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/A in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:14,785 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/B in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:14,785 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/dd4a29ab7a6e4688b2420764206a59b0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/7630274673bc42dba091ba25a08cd711, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/ea2f3099aedd4a2ca67429b66feec58c] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=38.3 K 2024-12-12T16:29:14,785 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/21131d683f534e50b675e55c117d8882, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/8ecff760ab13415bb877bbeafac3d348, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/7aa29c93eb744e6488d3d79a1942878e] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=36.0 K 2024-12-12T16:29:14,785 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 21131d683f534e50b675e55c117d8882, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1734020951622 2024-12-12T16:29:14,785 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd4a29ab7a6e4688b2420764206a59b0, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1734020951622 2024-12-12T16:29:14,786 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ecff760ab13415bb877bbeafac3d348, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1734020951661 2024-12-12T16:29:14,786 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7630274673bc42dba091ba25a08cd711, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1734020951661 2024-12-12T16:29:14,786 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 7aa29c93eb744e6488d3d79a1942878e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734020952808 2024-12-12T16:29:14,786 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea2f3099aedd4a2ca67429b66feec58c, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734020952808 2024-12-12T16:29:14,800 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#B#compaction#420 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:14,800 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#A#compaction#419 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:14,800 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/4234b091fc9c460598a64a3c9212b8fd is 50, key is test_row_0/B:col10/1734020952808/Put/seqid=0 2024-12-12T16:29:14,800 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/bef2065021204e0e9a1ba762d7d1b180 is 50, key is test_row_0/A:col10/1734020952808/Put/seqid=0 2024-12-12T16:29:14,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742320_1496 (size=12663) 2024-12-12T16:29:14,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742319_1495 (size=12663) 2024-12-12T16:29:14,815 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/4234b091fc9c460598a64a3c9212b8fd as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/4234b091fc9c460598a64a3c9212b8fd 2024-12-12T16:29:14,819 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/B of 7f9a08e11e132c4f4473bdc5fef699f6 into 4234b091fc9c460598a64a3c9212b8fd(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:14,819 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:14,819 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/B, priority=13, startTime=1734020954784; duration=0sec 2024-12-12T16:29:14,819 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:14,819 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:B 2024-12-12T16:29:14,819 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:29:14,820 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:29:14,820 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/C is initiating minor compaction (all files) 2024-12-12T16:29:14,820 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/C in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:14,820 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/2e89038b79e249dea46a2b8f373d3ec3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/0c3c22f5d8a04837856abd6986b9bac5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/f0df70f440dc45a292651a3cf1b1ef8b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/03a9a597c35b4a179b2a19b37ce749c8] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=47.8 K 2024-12-12T16:29:14,821 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e89038b79e249dea46a2b8f373d3ec3, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734020951002 2024-12-12T16:29:14,821 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c3c22f5d8a04837856abd6986b9bac5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1734020951622 2024-12-12T16:29:14,821 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting f0df70f440dc45a292651a3cf1b1ef8b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1734020951661 2024-12-12T16:29:14,822 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 03a9a597c35b4a179b2a19b37ce749c8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734020952808 2024-12-12T16:29:14,827 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#C#compaction#421 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:14,828 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/8356c5ba0fa5497fb07b710d4de935a5 is 50, key is test_row_0/C:col10/1734020952808/Put/seqid=0 2024-12-12T16:29:14,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742321_1497 (size=12629) 2024-12-12T16:29:14,835 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/8356c5ba0fa5497fb07b710d4de935a5 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/8356c5ba0fa5497fb07b710d4de935a5 2024-12-12T16:29:14,838 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/C of 7f9a08e11e132c4f4473bdc5fef699f6 into 8356c5ba0fa5497fb07b710d4de935a5(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:14,838 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:14,838 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/C, priority=12, startTime=1734020954784; duration=0sec 2024-12-12T16:29:14,838 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:14,838 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:C 2024-12-12T16:29:14,890 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:14,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-12T16:29:14,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:14,891 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T16:29:14,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:14,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:14,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:14,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:14,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:14,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:14,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/0de5a350decc4b2e8b0ad1c1d8732c95 is 50, key is test_row_0/A:col10/1734020954009/Put/seqid=0 2024-12-12T16:29:14,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742322_1498 (size=12151) 2024-12-12T16:29:14,910 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/0de5a350decc4b2e8b0ad1c1d8732c95 2024-12-12T16:29:14,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/65c3711cb5e045a987dc6fdd941f9def is 50, key is test_row_0/B:col10/1734020954009/Put/seqid=0 2024-12-12T16:29:14,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742323_1499 (size=12151) 2024-12-12T16:29:14,928 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/65c3711cb5e045a987dc6fdd941f9def 2024-12-12T16:29:14,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/fe1c48837006493e9d3878bbfdf538ce is 50, key is test_row_0/C:col10/1734020954009/Put/seqid=0 2024-12-12T16:29:14,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742324_1500 (size=12151) 2024-12-12T16:29:15,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:15,101 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:15,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:15,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021015129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:15,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:15,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021015133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:15,139 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:15,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021015134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:15,143 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:15,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021015140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:15,147 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:15,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021015143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:15,215 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/bef2065021204e0e9a1ba762d7d1b180 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/bef2065021204e0e9a1ba762d7d1b180 2024-12-12T16:29:15,219 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/A of 7f9a08e11e132c4f4473bdc5fef699f6 into bef2065021204e0e9a1ba762d7d1b180(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:15,219 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:15,219 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/A, priority=13, startTime=1734020954784; duration=0sec 2024-12-12T16:29:15,219 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:15,219 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:A 2024-12-12T16:29:15,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-12T16:29:15,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:15,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021015240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:15,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:15,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021015240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:15,341 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/fe1c48837006493e9d3878bbfdf538ce 2024-12-12T16:29:15,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/0de5a350decc4b2e8b0ad1c1d8732c95 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/0de5a350decc4b2e8b0ad1c1d8732c95 2024-12-12T16:29:15,348 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/0de5a350decc4b2e8b0ad1c1d8732c95, entries=150, sequenceid=233, filesize=11.9 K 2024-12-12T16:29:15,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/65c3711cb5e045a987dc6fdd941f9def as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/65c3711cb5e045a987dc6fdd941f9def 2024-12-12T16:29:15,352 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/65c3711cb5e045a987dc6fdd941f9def, entries=150, sequenceid=233, filesize=11.9 K 2024-12-12T16:29:15,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/fe1c48837006493e9d3878bbfdf538ce as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/fe1c48837006493e9d3878bbfdf538ce 2024-12-12T16:29:15,355 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/fe1c48837006493e9d3878bbfdf538ce, entries=150, sequenceid=233, filesize=11.9 K 2024-12-12T16:29:15,356 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 7f9a08e11e132c4f4473bdc5fef699f6 in 465ms, sequenceid=233, compaction requested=false 2024-12-12T16:29:15,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:15,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:15,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-12T16:29:15,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-12-12T16:29:15,359 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-12T16:29:15,359 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2290 sec 2024-12-12T16:29:15,360 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.2340 sec 2024-12-12T16:29:15,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:15,451 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T16:29:15,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:15,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:15,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:15,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:15,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:15,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:15,460 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/a5fa2b12d0ce42faa95fd123ea09b8f0 is 50, key is test_row_0/A:col10/1734020955450/Put/seqid=0 2024-12-12T16:29:15,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742325_1501 (size=12151) 2024-12-12T16:29:15,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:15,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021015507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:15,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:15,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021015507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:15,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:15,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021015613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:15,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:15,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021015613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:15,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:15,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021015816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:15,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:15,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021015817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:15,865 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/a5fa2b12d0ce42faa95fd123ea09b8f0 2024-12-12T16:29:15,871 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/cb558f5b89dd42cea2c45bde5e6c2095 is 50, key is test_row_0/B:col10/1734020955450/Put/seqid=0 2024-12-12T16:29:15,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742326_1502 (size=12151) 2024-12-12T16:29:16,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:16,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021016120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:16,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:16,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021016121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:16,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:16,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021016144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:16,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:16,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021016151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:16,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:16,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021016155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:16,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-12T16:29:16,231 INFO [Thread-2023 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-12-12T16:29:16,232 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:29:16,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-12-12T16:29:16,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-12T16:29:16,234 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:29:16,234 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:29:16,234 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:29:16,278 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/cb558f5b89dd42cea2c45bde5e6c2095 2024-12-12T16:29:16,283 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/385eebed687146d784da6d8b8e0c2207 is 50, key is test_row_0/C:col10/1734020955450/Put/seqid=0 2024-12-12T16:29:16,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742327_1503 (size=12151) 2024-12-12T16:29:16,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-12T16:29:16,386 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:16,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-12T16:29:16,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:16,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:16,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:16,387 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:16,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:16,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:16,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-12T16:29:16,539 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:16,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-12T16:29:16,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:16,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:16,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:16,540 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:16,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:16,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:16,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:16,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021016628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:16,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:16,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021016632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:16,688 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/385eebed687146d784da6d8b8e0c2207 2024-12-12T16:29:16,692 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:16,692 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-12T16:29:16,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:16,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:16,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:16,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/a5fa2b12d0ce42faa95fd123ea09b8f0 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/a5fa2b12d0ce42faa95fd123ea09b8f0 2024-12-12T16:29:16,693 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:16,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:16,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:16,697 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/a5fa2b12d0ce42faa95fd123ea09b8f0, entries=150, sequenceid=248, filesize=11.9 K 2024-12-12T16:29:16,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/cb558f5b89dd42cea2c45bde5e6c2095 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/cb558f5b89dd42cea2c45bde5e6c2095 2024-12-12T16:29:16,701 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/cb558f5b89dd42cea2c45bde5e6c2095, entries=150, sequenceid=248, filesize=11.9 K 2024-12-12T16:29:16,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/385eebed687146d784da6d8b8e0c2207 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/385eebed687146d784da6d8b8e0c2207 2024-12-12T16:29:16,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/385eebed687146d784da6d8b8e0c2207, entries=150, sequenceid=248, filesize=11.9 K 2024-12-12T16:29:16,705 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 7f9a08e11e132c4f4473bdc5fef699f6 in 1254ms, sequenceid=248, compaction requested=true 2024-12-12T16:29:16,705 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:16,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:29:16,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:16,705 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:16,705 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:16,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:29:16,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:16,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:29:16,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:16,707 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:16,707 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:16,707 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/A is initiating minor compaction (all files) 2024-12-12T16:29:16,707 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/B is initiating minor compaction (all files) 2024-12-12T16:29:16,707 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/A in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:16,707 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/B in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:16,707 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/bef2065021204e0e9a1ba762d7d1b180, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/0de5a350decc4b2e8b0ad1c1d8732c95, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/a5fa2b12d0ce42faa95fd123ea09b8f0] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=36.1 K 2024-12-12T16:29:16,707 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/4234b091fc9c460598a64a3c9212b8fd, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/65c3711cb5e045a987dc6fdd941f9def, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/cb558f5b89dd42cea2c45bde5e6c2095] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=36.1 K 2024-12-12T16:29:16,708 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 4234b091fc9c460598a64a3c9212b8fd, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734020952808 2024-12-12T16:29:16,708 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting bef2065021204e0e9a1ba762d7d1b180, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734020952808 2024-12-12T16:29:16,708 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0de5a350decc4b2e8b0ad1c1d8732c95, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1734020953999 2024-12-12T16:29:16,708 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 65c3711cb5e045a987dc6fdd941f9def, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1734020953999 2024-12-12T16:29:16,708 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5fa2b12d0ce42faa95fd123ea09b8f0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1734020955133 2024-12-12T16:29:16,708 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting cb558f5b89dd42cea2c45bde5e6c2095, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1734020955133 2024-12-12T16:29:16,714 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#A#compaction#428 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:16,714 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/df8d4e635d16444bb658294d49a4cf83 is 50, key is test_row_0/A:col10/1734020955450/Put/seqid=0 2024-12-12T16:29:16,718 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#B#compaction#429 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:16,719 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/02570d285f264873abef038b5003dd19 is 50, key is test_row_0/B:col10/1734020955450/Put/seqid=0 2024-12-12T16:29:16,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742328_1504 (size=12765) 2024-12-12T16:29:16,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742329_1505 (size=12765) 2024-12-12T16:29:16,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-12T16:29:16,845 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:16,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-12T16:29:16,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:16,846 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T16:29:16,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:16,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:16,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:16,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:16,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:16,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:16,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/f430940484c54fbcbb63db574ef0f705 is 50, key is test_row_0/A:col10/1734020955496/Put/seqid=0 2024-12-12T16:29:16,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742330_1506 (size=12301) 2024-12-12T16:29:17,140 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/df8d4e635d16444bb658294d49a4cf83 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/df8d4e635d16444bb658294d49a4cf83 2024-12-12T16:29:17,142 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/02570d285f264873abef038b5003dd19 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/02570d285f264873abef038b5003dd19 2024-12-12T16:29:17,145 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/A of 7f9a08e11e132c4f4473bdc5fef699f6 into df8d4e635d16444bb658294d49a4cf83(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:17,145 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:17,145 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/A, priority=13, startTime=1734020956705; duration=0sec 2024-12-12T16:29:17,145 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:17,145 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:A 2024-12-12T16:29:17,145 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:17,146 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:17,146 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/C is initiating minor compaction (all files) 2024-12-12T16:29:17,146 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/C in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:17,146 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/8356c5ba0fa5497fb07b710d4de935a5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/fe1c48837006493e9d3878bbfdf538ce, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/385eebed687146d784da6d8b8e0c2207] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=36.1 K 2024-12-12T16:29:17,147 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8356c5ba0fa5497fb07b710d4de935a5, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734020952808 2024-12-12T16:29:17,147 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/B of 7f9a08e11e132c4f4473bdc5fef699f6 into 02570d285f264873abef038b5003dd19(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:17,147 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:17,147 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/B, priority=13, startTime=1734020956705; duration=0sec 2024-12-12T16:29:17,147 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:17,148 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:B 2024-12-12T16:29:17,148 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe1c48837006493e9d3878bbfdf538ce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1734020953999 2024-12-12T16:29:17,148 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 385eebed687146d784da6d8b8e0c2207, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1734020955133 2024-12-12T16:29:17,155 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#C#compaction#431 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:17,155 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/eb689b7596e74eb7a7dff667b41b46e9 is 50, key is test_row_0/C:col10/1734020955450/Put/seqid=0 2024-12-12T16:29:17,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742331_1507 (size=12731) 2024-12-12T16:29:17,164 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/eb689b7596e74eb7a7dff667b41b46e9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/eb689b7596e74eb7a7dff667b41b46e9 2024-12-12T16:29:17,168 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/C of 7f9a08e11e132c4f4473bdc5fef699f6 into eb689b7596e74eb7a7dff667b41b46e9(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:17,168 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:17,168 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/C, priority=13, startTime=1734020956706; duration=0sec 2024-12-12T16:29:17,168 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:17,168 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:C 2024-12-12T16:29:17,255 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/f430940484c54fbcbb63db574ef0f705 2024-12-12T16:29:17,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/88c05bc586b84d3690901211ca8afb15 is 50, key is test_row_0/B:col10/1734020955496/Put/seqid=0 2024-12-12T16:29:17,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742332_1508 (size=12301) 2024-12-12T16:29:17,270 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/88c05bc586b84d3690901211ca8afb15 2024-12-12T16:29:17,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/2c14cea1133c4950a50e3f0e76876e68 is 50, key is test_row_0/C:col10/1734020955496/Put/seqid=0 2024-12-12T16:29:17,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742333_1509 (size=12301) 2024-12-12T16:29:17,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-12T16:29:17,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:17,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:17,681 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/2c14cea1133c4950a50e3f0e76876e68 2024-12-12T16:29:17,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/f430940484c54fbcbb63db574ef0f705 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/f430940484c54fbcbb63db574ef0f705 2024-12-12T16:29:17,687 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/f430940484c54fbcbb63db574ef0f705, entries=150, sequenceid=270, filesize=12.0 K 2024-12-12T16:29:17,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/88c05bc586b84d3690901211ca8afb15 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/88c05bc586b84d3690901211ca8afb15 2024-12-12T16:29:17,690 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/88c05bc586b84d3690901211ca8afb15, entries=150, sequenceid=270, filesize=12.0 K 2024-12-12T16:29:17,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/2c14cea1133c4950a50e3f0e76876e68 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/2c14cea1133c4950a50e3f0e76876e68 2024-12-12T16:29:17,694 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/2c14cea1133c4950a50e3f0e76876e68, entries=150, sequenceid=270, filesize=12.0 K 2024-12-12T16:29:17,695 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 7f9a08e11e132c4f4473bdc5fef699f6 in 849ms, sequenceid=270, compaction requested=false 2024-12-12T16:29:17,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:17,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:17,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-12T16:29:17,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-12-12T16:29:17,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:17,696 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-12T16:29:17,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:17,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:17,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:17,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:17,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:17,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:17,698 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-12-12T16:29:17,698 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4620 sec 2024-12-12T16:29:17,700 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.4660 sec 2024-12-12T16:29:17,702 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/6b7e90602482454cbd3ef829869d0e4c is 50, key is test_row_0/A:col10/1734020957656/Put/seqid=0 2024-12-12T16:29:17,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742334_1510 (size=17181) 2024-12-12T16:29:17,716 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/6b7e90602482454cbd3ef829869d0e4c 2024-12-12T16:29:17,723 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/d017e80487c14410aa287adde1be67bd is 50, key is test_row_0/B:col10/1734020957656/Put/seqid=0 2024-12-12T16:29:17,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742335_1511 (size=12301) 2024-12-12T16:29:17,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:17,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021017744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:17,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:17,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021017748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:17,808 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T16:29:17,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:17,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021017850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:17,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:17,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021017854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:18,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:18,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021018052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:18,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:18,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021018057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:18,142 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/d017e80487c14410aa287adde1be67bd 2024-12-12T16:29:18,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/7c264fb2514241cf938ef9d1c1d92d3d is 50, key is test_row_0/C:col10/1734020957656/Put/seqid=0 2024-12-12T16:29:18,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:18,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021018155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:18,159 DEBUG [Thread-2021 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4156 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:29:18,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:18,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021018160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:18,165 DEBUG [Thread-2019 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4154 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:29:18,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742336_1512 (size=12301) 2024-12-12T16:29:18,169 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/7c264fb2514241cf938ef9d1c1d92d3d 2024-12-12T16:29:18,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/6b7e90602482454cbd3ef829869d0e4c as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/6b7e90602482454cbd3ef829869d0e4c 2024-12-12T16:29:18,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:18,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021018168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:18,175 DEBUG [Thread-2017 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4165 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:29:18,177 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/6b7e90602482454cbd3ef829869d0e4c, entries=250, sequenceid=288, filesize=16.8 K 2024-12-12T16:29:18,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/d017e80487c14410aa287adde1be67bd as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d017e80487c14410aa287adde1be67bd 2024-12-12T16:29:18,181 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d017e80487c14410aa287adde1be67bd, entries=150, sequenceid=288, filesize=12.0 K 2024-12-12T16:29:18,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/7c264fb2514241cf938ef9d1c1d92d3d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/7c264fb2514241cf938ef9d1c1d92d3d 2024-12-12T16:29:18,184 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/7c264fb2514241cf938ef9d1c1d92d3d, entries=150, sequenceid=288, filesize=12.0 K 2024-12-12T16:29:18,185 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 7f9a08e11e132c4f4473bdc5fef699f6 in 489ms, sequenceid=288, compaction requested=true 2024-12-12T16:29:18,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:18,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:29:18,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:18,185 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:18,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:29:18,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:18,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:29:18,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:18,186 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:18,186 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42247 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:18,186 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/A is initiating minor compaction (all files) 2024-12-12T16:29:18,186 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:18,187 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/A in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:18,187 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/B is initiating minor compaction (all files) 2024-12-12T16:29:18,187 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/df8d4e635d16444bb658294d49a4cf83, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/f430940484c54fbcbb63db574ef0f705, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/6b7e90602482454cbd3ef829869d0e4c] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=41.3 K 2024-12-12T16:29:18,187 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/B in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:18,187 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/02570d285f264873abef038b5003dd19, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/88c05bc586b84d3690901211ca8afb15, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d017e80487c14410aa287adde1be67bd] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=36.5 K 2024-12-12T16:29:18,187 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting df8d4e635d16444bb658294d49a4cf83, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1734020955133 2024-12-12T16:29:18,187 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 02570d285f264873abef038b5003dd19, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1734020955133 2024-12-12T16:29:18,187 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting f430940484c54fbcbb63db574ef0f705, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1734020955496 2024-12-12T16:29:18,187 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 88c05bc586b84d3690901211ca8afb15, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1734020955496 2024-12-12T16:29:18,187 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b7e90602482454cbd3ef829869d0e4c, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1734020957641 2024-12-12T16:29:18,188 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting d017e80487c14410aa287adde1be67bd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1734020957654 2024-12-12T16:29:18,194 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#A#compaction#437 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:18,194 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/f25d3215b8a84be4907a043a6516f3f8 is 50, key is test_row_0/A:col10/1734020957656/Put/seqid=0 2024-12-12T16:29:18,194 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#B#compaction#438 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:18,195 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/574e65705c014b1d98f7faf354674d02 is 50, key is test_row_0/B:col10/1734020957656/Put/seqid=0 2024-12-12T16:29:18,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742338_1514 (size=13017) 2024-12-12T16:29:18,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742337_1513 (size=13017) 2024-12-12T16:29:18,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-12T16:29:18,337 INFO [Thread-2023 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-12-12T16:29:18,339 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:29:18,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-12-12T16:29:18,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-12T16:29:18,340 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:29:18,341 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:29:18,341 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:29:18,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:18,361 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T16:29:18,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:18,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:18,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:18,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:18,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:18,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:18,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/0dca8cac80334dd38b0fe00c19ebd900 is 50, key is test_row_0/A:col10/1734020957735/Put/seqid=0 2024-12-12T16:29:18,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742339_1515 (size=14741) 2024-12-12T16:29:18,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:18,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021018396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:18,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:18,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021018403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:18,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-12T16:29:18,492 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:18,493 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-12T16:29:18,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:18,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:18,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:18,493 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:18,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:18,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:18,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:18,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021018504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:18,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:18,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021018509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:18,623 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/574e65705c014b1d98f7faf354674d02 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/574e65705c014b1d98f7faf354674d02 2024-12-12T16:29:18,624 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/f25d3215b8a84be4907a043a6516f3f8 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/f25d3215b8a84be4907a043a6516f3f8 2024-12-12T16:29:18,627 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/B of 7f9a08e11e132c4f4473bdc5fef699f6 into 574e65705c014b1d98f7faf354674d02(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:18,627 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:18,627 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/B, priority=13, startTime=1734020958185; duration=0sec 2024-12-12T16:29:18,627 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:18,628 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:B 2024-12-12T16:29:18,628 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:18,628 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/A of 7f9a08e11e132c4f4473bdc5fef699f6 into f25d3215b8a84be4907a043a6516f3f8(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:18,629 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:18,629 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/A, priority=13, startTime=1734020958185; duration=0sec 2024-12-12T16:29:18,629 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:18,629 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:A 2024-12-12T16:29:18,629 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:18,629 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/C is initiating minor compaction (all files) 2024-12-12T16:29:18,629 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/C in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:18,629 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/eb689b7596e74eb7a7dff667b41b46e9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/2c14cea1133c4950a50e3f0e76876e68, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/7c264fb2514241cf938ef9d1c1d92d3d] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=36.5 K 2024-12-12T16:29:18,630 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting eb689b7596e74eb7a7dff667b41b46e9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1734020955133 2024-12-12T16:29:18,630 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c14cea1133c4950a50e3f0e76876e68, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1734020955496 2024-12-12T16:29:18,630 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c264fb2514241cf938ef9d1c1d92d3d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1734020957654 2024-12-12T16:29:18,635 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#C#compaction#440 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:18,636 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/bcc6d544e2e3488ba6291f572e27aa5c is 50, key is test_row_0/C:col10/1734020957656/Put/seqid=0 2024-12-12T16:29:18,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742340_1516 (size=12983) 2024-12-12T16:29:18,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-12T16:29:18,645 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/bcc6d544e2e3488ba6291f572e27aa5c as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/bcc6d544e2e3488ba6291f572e27aa5c 2024-12-12T16:29:18,645 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:18,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-12T16:29:18,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:18,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:18,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:18,646 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:18,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:18,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:18,650 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/C of 7f9a08e11e132c4f4473bdc5fef699f6 into bcc6d544e2e3488ba6291f572e27aa5c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:18,650 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:18,650 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/C, priority=13, startTime=1734020958186; duration=0sec 2024-12-12T16:29:18,651 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:18,651 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:C 2024-12-12T16:29:18,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:18,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021018713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:18,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:18,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021018715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:18,771 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/0dca8cac80334dd38b0fe00c19ebd900 2024-12-12T16:29:18,777 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/6db266ee134d4ed7936c6fb103c60f24 is 50, key is test_row_0/B:col10/1734020957735/Put/seqid=0 2024-12-12T16:29:18,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742341_1517 (size=12301) 2024-12-12T16:29:18,788 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/6db266ee134d4ed7936c6fb103c60f24 2024-12-12T16:29:18,799 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:18,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-12T16:29:18,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:18,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:18,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:18,800 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:18,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:18,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:18,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/9f27c6b14f384a188ed4866f90d5f3e9 is 50, key is test_row_0/C:col10/1734020957735/Put/seqid=0 2024-12-12T16:29:18,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742342_1518 (size=12301) 2024-12-12T16:29:18,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-12T16:29:18,956 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:18,957 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-12T16:29:18,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:18,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:18,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:18,957 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:18,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:18,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:19,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:19,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021019018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:19,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:19,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021019020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:19,109 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:19,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-12T16:29:19,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:19,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:19,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:19,110 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:19,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:19,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:19,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/9f27c6b14f384a188ed4866f90d5f3e9 2024-12-12T16:29:19,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/0dca8cac80334dd38b0fe00c19ebd900 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/0dca8cac80334dd38b0fe00c19ebd900 2024-12-12T16:29:19,253 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/0dca8cac80334dd38b0fe00c19ebd900, entries=200, sequenceid=310, filesize=14.4 K 2024-12-12T16:29:19,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/6db266ee134d4ed7936c6fb103c60f24 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/6db266ee134d4ed7936c6fb103c60f24 2024-12-12T16:29:19,257 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/6db266ee134d4ed7936c6fb103c60f24, entries=150, sequenceid=310, filesize=12.0 K 2024-12-12T16:29:19,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/9f27c6b14f384a188ed4866f90d5f3e9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/9f27c6b14f384a188ed4866f90d5f3e9 2024-12-12T16:29:19,261 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/9f27c6b14f384a188ed4866f90d5f3e9, entries=150, sequenceid=310, filesize=12.0 K 2024-12-12T16:29:19,262 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:19,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-12T16:29:19,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:19,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 7f9a08e11e132c4f4473bdc5fef699f6 in 901ms, sequenceid=310, compaction requested=false 2024-12-12T16:29:19,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:19,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:19,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:19,263 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:19,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:19,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:19,415 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:19,415 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-12T16:29:19,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:19,415 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T16:29:19,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:19,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:19,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:19,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:19,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:19,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:19,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/bc55f306090c4a929504ae206f962e32 is 50, key is test_row_0/A:col10/1734020958394/Put/seqid=0 2024-12-12T16:29:19,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742343_1519 (size=12301) 2024-12-12T16:29:19,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-12T16:29:19,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:19,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:19,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:19,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021019590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:19,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:19,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021019594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:19,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:19,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021019695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:19,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:19,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021019700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:19,825 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/bc55f306090c4a929504ae206f962e32 2024-12-12T16:29:19,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/92dda5b8cc50420dac0fe372150390f9 is 50, key is test_row_0/B:col10/1734020958394/Put/seqid=0 2024-12-12T16:29:19,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742344_1520 (size=12301) 2024-12-12T16:29:19,838 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/92dda5b8cc50420dac0fe372150390f9 2024-12-12T16:29:19,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/31ef6ed03ac04ddc8182f00996b87a69 is 50, key is test_row_0/C:col10/1734020958394/Put/seqid=0 2024-12-12T16:29:19,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742345_1521 (size=12301) 2024-12-12T16:29:19,850 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/31ef6ed03ac04ddc8182f00996b87a69 2024-12-12T16:29:19,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/bc55f306090c4a929504ae206f962e32 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/bc55f306090c4a929504ae206f962e32 2024-12-12T16:29:19,857 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/bc55f306090c4a929504ae206f962e32, entries=150, sequenceid=327, filesize=12.0 K 2024-12-12T16:29:19,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/92dda5b8cc50420dac0fe372150390f9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/92dda5b8cc50420dac0fe372150390f9 2024-12-12T16:29:19,861 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/92dda5b8cc50420dac0fe372150390f9, entries=150, sequenceid=327, filesize=12.0 K 2024-12-12T16:29:19,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/31ef6ed03ac04ddc8182f00996b87a69 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/31ef6ed03ac04ddc8182f00996b87a69 2024-12-12T16:29:19,865 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/31ef6ed03ac04ddc8182f00996b87a69, entries=150, sequenceid=327, filesize=12.0 K 2024-12-12T16:29:19,865 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 7f9a08e11e132c4f4473bdc5fef699f6 in 450ms, sequenceid=327, compaction requested=true 2024-12-12T16:29:19,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:19,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:19,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-12T16:29:19,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-12T16:29:19,867 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-12-12T16:29:19,867 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5250 sec 2024-12-12T16:29:19,868 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 1.5290 sec 2024-12-12T16:29:19,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:19,909 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T16:29:19,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:19,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:19,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:19,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:19,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:19,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:19,913 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/6a53e54197de4ba7a008327c99d11508 is 50, key is test_row_0/A:col10/1734020959909/Put/seqid=0 2024-12-12T16:29:19,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742346_1522 (size=14741) 2024-12-12T16:29:19,918 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/6a53e54197de4ba7a008327c99d11508 2024-12-12T16:29:19,925 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/adbee3a52f504d69bcba55d3d63cb1fe is 50, key is test_row_0/B:col10/1734020959909/Put/seqid=0 2024-12-12T16:29:19,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742347_1523 (size=12301) 2024-12-12T16:29:19,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:19,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021019938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:19,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:19,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021019947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:20,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:20,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021020048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:20,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:20,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021020050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:20,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:20,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021020253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:20,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:20,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021020255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:20,346 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/adbee3a52f504d69bcba55d3d63cb1fe 2024-12-12T16:29:20,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/23a5e7e347da46dc925f4d75996d9972 is 50, key is test_row_0/C:col10/1734020959909/Put/seqid=0 2024-12-12T16:29:20,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742348_1524 (size=12301) 2024-12-12T16:29:20,362 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/23a5e7e347da46dc925f4d75996d9972 2024-12-12T16:29:20,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/6a53e54197de4ba7a008327c99d11508 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/6a53e54197de4ba7a008327c99d11508 2024-12-12T16:29:20,370 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/6a53e54197de4ba7a008327c99d11508, entries=200, sequenceid=351, filesize=14.4 K 2024-12-12T16:29:20,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/adbee3a52f504d69bcba55d3d63cb1fe as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/adbee3a52f504d69bcba55d3d63cb1fe 2024-12-12T16:29:20,373 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/adbee3a52f504d69bcba55d3d63cb1fe, entries=150, sequenceid=351, filesize=12.0 K 2024-12-12T16:29:20,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/23a5e7e347da46dc925f4d75996d9972 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/23a5e7e347da46dc925f4d75996d9972 2024-12-12T16:29:20,378 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/23a5e7e347da46dc925f4d75996d9972, entries=150, sequenceid=351, filesize=12.0 K 2024-12-12T16:29:20,379 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 7f9a08e11e132c4f4473bdc5fef699f6 in 470ms, sequenceid=351, compaction requested=true 2024-12-12T16:29:20,379 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:20,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:29:20,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:20,379 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:29:20,379 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:29:20,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:29:20,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:20,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:29:20,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:20,380 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54800 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:29:20,380 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/A is initiating minor compaction (all files) 2024-12-12T16:29:20,380 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/A in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:20,380 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/f25d3215b8a84be4907a043a6516f3f8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/0dca8cac80334dd38b0fe00c19ebd900, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/bc55f306090c4a929504ae206f962e32, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/6a53e54197de4ba7a008327c99d11508] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=53.5 K 2024-12-12T16:29:20,381 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting f25d3215b8a84be4907a043a6516f3f8, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1734020957654 2024-12-12T16:29:20,381 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:29:20,381 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/B is initiating minor compaction (all files) 2024-12-12T16:29:20,381 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/B in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:20,381 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/574e65705c014b1d98f7faf354674d02, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/6db266ee134d4ed7936c6fb103c60f24, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/92dda5b8cc50420dac0fe372150390f9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/adbee3a52f504d69bcba55d3d63cb1fe] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=48.8 K 2024-12-12T16:29:20,381 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 574e65705c014b1d98f7faf354674d02, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1734020957654 2024-12-12T16:29:20,381 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0dca8cac80334dd38b0fe00c19ebd900, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1734020957735 2024-12-12T16:29:20,382 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 6db266ee134d4ed7936c6fb103c60f24, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1734020957735 2024-12-12T16:29:20,382 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc55f306090c4a929504ae206f962e32, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1734020958388 2024-12-12T16:29:20,382 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 92dda5b8cc50420dac0fe372150390f9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1734020958388 2024-12-12T16:29:20,382 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a53e54197de4ba7a008327c99d11508, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1734020959583 2024-12-12T16:29:20,382 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting adbee3a52f504d69bcba55d3d63cb1fe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1734020959583 2024-12-12T16:29:20,392 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#A#compaction#449 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:20,392 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/f59f5d6eb82646f0ac84b6b11fa3f60c is 50, key is test_row_0/A:col10/1734020959909/Put/seqid=0 2024-12-12T16:29:20,394 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#B#compaction#450 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:20,395 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/c94ebf30b9254adf8886fedee04d6716 is 50, key is test_row_0/B:col10/1734020959909/Put/seqid=0 2024-12-12T16:29:20,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742350_1526 (size=13153) 2024-12-12T16:29:20,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742349_1525 (size=13153) 2024-12-12T16:29:20,418 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/f59f5d6eb82646f0ac84b6b11fa3f60c as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/f59f5d6eb82646f0ac84b6b11fa3f60c 2024-12-12T16:29:20,424 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/A of 7f9a08e11e132c4f4473bdc5fef699f6 into f59f5d6eb82646f0ac84b6b11fa3f60c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:20,424 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:20,424 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/A, priority=12, startTime=1734020960379; duration=0sec 2024-12-12T16:29:20,424 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:20,424 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:A 2024-12-12T16:29:20,425 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:29:20,426 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:29:20,426 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/C is initiating minor compaction (all files) 2024-12-12T16:29:20,426 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/C in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:20,426 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/bcc6d544e2e3488ba6291f572e27aa5c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/9f27c6b14f384a188ed4866f90d5f3e9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/31ef6ed03ac04ddc8182f00996b87a69, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/23a5e7e347da46dc925f4d75996d9972] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=48.7 K 2024-12-12T16:29:20,427 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting bcc6d544e2e3488ba6291f572e27aa5c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1734020957654 2024-12-12T16:29:20,427 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f27c6b14f384a188ed4866f90d5f3e9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1734020957735 2024-12-12T16:29:20,427 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 31ef6ed03ac04ddc8182f00996b87a69, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1734020958388 2024-12-12T16:29:20,427 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 23a5e7e347da46dc925f4d75996d9972, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1734020959583 2024-12-12T16:29:20,437 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#C#compaction#451 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:20,438 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/8c2b919144af4e139511bbc6b95824df is 50, key is test_row_0/C:col10/1734020959909/Put/seqid=0 2024-12-12T16:29:20,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-12T16:29:20,444 INFO [Thread-2023 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-12-12T16:29:20,445 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:29:20,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-12-12T16:29:20,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-12T16:29:20,448 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:29:20,449 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:29:20,449 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:29:20,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742351_1527 (size=13119) 2024-12-12T16:29:20,460 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/8c2b919144af4e139511bbc6b95824df as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/8c2b919144af4e139511bbc6b95824df 2024-12-12T16:29:20,465 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/C of 7f9a08e11e132c4f4473bdc5fef699f6 into 8c2b919144af4e139511bbc6b95824df(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:20,465 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:20,465 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/C, priority=12, startTime=1734020960379; duration=0sec 2024-12-12T16:29:20,465 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:20,465 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:C 2024-12-12T16:29:20,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-12T16:29:20,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:20,562 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T16:29:20,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:20,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:20,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:20,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:20,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:20,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:20,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/c8beb52c60524ca59277d298d445d1a9 is 50, key is test_row_0/A:col10/1734020960561/Put/seqid=0 2024-12-12T16:29:20,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742352_1528 (size=14737) 2024-12-12T16:29:20,601 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:20,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-12T16:29:20,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:20,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:20,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:20,602 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:20,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:20,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:20,613 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:20,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021020607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:20,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:20,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021020612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:20,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:20,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021020714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:20,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:20,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021020716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:20,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-12T16:29:20,753 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:20,753 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-12T16:29:20,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:20,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:20,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:20,754 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:20,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:20,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:20,812 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/c94ebf30b9254adf8886fedee04d6716 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/c94ebf30b9254adf8886fedee04d6716 2024-12-12T16:29:20,816 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/B of 7f9a08e11e132c4f4473bdc5fef699f6 into c94ebf30b9254adf8886fedee04d6716(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:20,816 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:20,816 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/B, priority=12, startTime=1734020960379; duration=0sec 2024-12-12T16:29:20,816 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:20,816 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:B 2024-12-12T16:29:20,906 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:20,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-12T16:29:20,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:20,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:20,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:20,907 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:20,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:20,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:20,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:20,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021020918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:20,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:20,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021020919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:20,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/c8beb52c60524ca59277d298d445d1a9 2024-12-12T16:29:20,989 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/9adc64818b73484d9e8cf33ee5ea9d16 is 50, key is test_row_0/B:col10/1734020960561/Put/seqid=0 2024-12-12T16:29:20,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742353_1529 (size=9857) 2024-12-12T16:29:21,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-12T16:29:21,059 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:21,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-12T16:29:21,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:21,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:21,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:21,060 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:21,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:21,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:21,212 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:21,213 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-12T16:29:21,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:21,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:21,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:21,213 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:21,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:21,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:21,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:21,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021021223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:21,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:21,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021021224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:21,365 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:21,366 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-12T16:29:21,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:21,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:21,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:21,366 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:21,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:21,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:21,394 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/9adc64818b73484d9e8cf33ee5ea9d16 2024-12-12T16:29:21,402 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/31202a401aad413ba57f2e5432241857 is 50, key is test_row_0/C:col10/1734020960561/Put/seqid=0 2024-12-12T16:29:21,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742354_1530 (size=9857) 2024-12-12T16:29:21,406 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/31202a401aad413ba57f2e5432241857 2024-12-12T16:29:21,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/c8beb52c60524ca59277d298d445d1a9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/c8beb52c60524ca59277d298d445d1a9 2024-12-12T16:29:21,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/c8beb52c60524ca59277d298d445d1a9, entries=200, sequenceid=367, filesize=14.4 K 2024-12-12T16:29:21,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/9adc64818b73484d9e8cf33ee5ea9d16 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/9adc64818b73484d9e8cf33ee5ea9d16 2024-12-12T16:29:21,417 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/9adc64818b73484d9e8cf33ee5ea9d16, entries=100, sequenceid=367, filesize=9.6 K 2024-12-12T16:29:21,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/31202a401aad413ba57f2e5432241857 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/31202a401aad413ba57f2e5432241857 2024-12-12T16:29:21,422 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/31202a401aad413ba57f2e5432241857, entries=100, sequenceid=367, filesize=9.6 K 2024-12-12T16:29:21,423 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 7f9a08e11e132c4f4473bdc5fef699f6 in 861ms, sequenceid=367, compaction requested=false 2024-12-12T16:29:21,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:21,518 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:21,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-12T16:29:21,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:21,519 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T16:29:21,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:21,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:21,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:21,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:21,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:21,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:21,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/0e3f5228b4e2477797aec56da4e792fd is 50, key is test_row_0/A:col10/1734020960596/Put/seqid=0 2024-12-12T16:29:21,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742355_1531 (size=12301) 2024-12-12T16:29:21,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-12T16:29:21,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:21,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:21,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:21,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021021757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:21,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:21,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021021757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:21,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:21,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021021862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:21,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:21,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021021862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:21,931 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/0e3f5228b4e2477797aec56da4e792fd 2024-12-12T16:29:21,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/565b5157c29e482e81f0c20ae1266c8d is 50, key is test_row_0/B:col10/1734020960596/Put/seqid=0 2024-12-12T16:29:21,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742356_1532 (size=12301) 2024-12-12T16:29:22,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:22,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021022066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:22,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:22,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021022068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:22,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:22,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41942 deadline: 1734021022178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:22,180 DEBUG [Thread-2017 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8170 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:29:22,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:22,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41966 deadline: 1734021022185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:22,189 DEBUG [Thread-2019 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8178 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:29:22,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:22,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41956 deadline: 1734021022194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:22,198 DEBUG [Thread-2021 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8194 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:29:22,344 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/565b5157c29e482e81f0c20ae1266c8d 2024-12-12T16:29:22,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/63c54c69a836495382cb27a14aad4ac7 is 50, key is test_row_0/C:col10/1734020960596/Put/seqid=0 2024-12-12T16:29:22,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742357_1533 (size=12301) 2024-12-12T16:29:22,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:22,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021022374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:22,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:22,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021022374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:22,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-12T16:29:22,755 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/63c54c69a836495382cb27a14aad4ac7 2024-12-12T16:29:22,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/0e3f5228b4e2477797aec56da4e792fd as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/0e3f5228b4e2477797aec56da4e792fd 2024-12-12T16:29:22,764 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/0e3f5228b4e2477797aec56da4e792fd, entries=150, sequenceid=390, filesize=12.0 K 2024-12-12T16:29:22,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/565b5157c29e482e81f0c20ae1266c8d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/565b5157c29e482e81f0c20ae1266c8d 2024-12-12T16:29:22,768 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/565b5157c29e482e81f0c20ae1266c8d, entries=150, sequenceid=390, filesize=12.0 K 2024-12-12T16:29:22,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/63c54c69a836495382cb27a14aad4ac7 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/63c54c69a836495382cb27a14aad4ac7 2024-12-12T16:29:22,772 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/63c54c69a836495382cb27a14aad4ac7, entries=150, sequenceid=390, filesize=12.0 K 2024-12-12T16:29:22,773 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 7f9a08e11e132c4f4473bdc5fef699f6 in 1254ms, sequenceid=390, compaction requested=true 2024-12-12T16:29:22,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:22,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:22,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-12T16:29:22,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-12-12T16:29:22,776 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-12T16:29:22,776 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3250 sec 2024-12-12T16:29:22,777 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 2.3310 sec 2024-12-12T16:29:22,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:22,885 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-12T16:29:22,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:22,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:22,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:22,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:22,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:22,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:22,889 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/e2990383524244b0b17eb765050655e8 is 50, key is test_row_0/A:col10/1734020961755/Put/seqid=0 2024-12-12T16:29:22,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742358_1534 (size=14741) 2024-12-12T16:29:22,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:22,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:22,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021022941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:22,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021022942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:23,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:23,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021023048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:23,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:23,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021023048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:23,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:23,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021023250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:23,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:23,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021023251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:23,294 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/e2990383524244b0b17eb765050655e8 2024-12-12T16:29:23,301 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/bdaa78cbde55435ab588cdb13bb2b752 is 50, key is test_row_0/B:col10/1734020961755/Put/seqid=0 2024-12-12T16:29:23,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742359_1535 (size=12301) 2024-12-12T16:29:23,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:23,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021023554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:23,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:23,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021023556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:23,708 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/bdaa78cbde55435ab588cdb13bb2b752 2024-12-12T16:29:23,718 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/6a10303637eb4593b69fcf3a992056da is 50, key is test_row_0/C:col10/1734020961755/Put/seqid=0 2024-12-12T16:29:23,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742360_1536 (size=12301) 2024-12-12T16:29:24,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:24,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41980 deadline: 1734021024056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:24,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:24,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42016 deadline: 1734021024061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:24,124 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/6a10303637eb4593b69fcf3a992056da 2024-12-12T16:29:24,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/e2990383524244b0b17eb765050655e8 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/e2990383524244b0b17eb765050655e8 2024-12-12T16:29:24,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/e2990383524244b0b17eb765050655e8, entries=200, sequenceid=405, filesize=14.4 K 2024-12-12T16:29:24,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/bdaa78cbde55435ab588cdb13bb2b752 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/bdaa78cbde55435ab588cdb13bb2b752 2024-12-12T16:29:24,135 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/bdaa78cbde55435ab588cdb13bb2b752, entries=150, sequenceid=405, filesize=12.0 K 2024-12-12T16:29:24,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/6a10303637eb4593b69fcf3a992056da as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/6a10303637eb4593b69fcf3a992056da 2024-12-12T16:29:24,138 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/6a10303637eb4593b69fcf3a992056da, entries=150, sequenceid=405, filesize=12.0 K 2024-12-12T16:29:24,139 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 7f9a08e11e132c4f4473bdc5fef699f6 in 1255ms, sequenceid=405, compaction requested=true 2024-12-12T16:29:24,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:24,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:29:24,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:24,139 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:29:24,139 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:29:24,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:29:24,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:24,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7f9a08e11e132c4f4473bdc5fef699f6:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:29:24,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:24,141 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47612 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:29:24,141 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/B is initiating minor compaction (all files) 2024-12-12T16:29:24,141 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/B in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:24,141 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/c94ebf30b9254adf8886fedee04d6716, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/9adc64818b73484d9e8cf33ee5ea9d16, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/565b5157c29e482e81f0c20ae1266c8d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/bdaa78cbde55435ab588cdb13bb2b752] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=46.5 K 2024-12-12T16:29:24,141 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54932 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:29:24,141 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/A is initiating minor compaction (all files) 2024-12-12T16:29:24,141 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/A in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:24,141 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/f59f5d6eb82646f0ac84b6b11fa3f60c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/c8beb52c60524ca59277d298d445d1a9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/0e3f5228b4e2477797aec56da4e792fd, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/e2990383524244b0b17eb765050655e8] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=53.6 K 2024-12-12T16:29:24,142 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting c94ebf30b9254adf8886fedee04d6716, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1734020959583 2024-12-12T16:29:24,142 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting f59f5d6eb82646f0ac84b6b11fa3f60c, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1734020959583 2024-12-12T16:29:24,142 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 9adc64818b73484d9e8cf33ee5ea9d16, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1734020960561 2024-12-12T16:29:24,142 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8beb52c60524ca59277d298d445d1a9, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1734020959929 2024-12-12T16:29:24,142 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 565b5157c29e482e81f0c20ae1266c8d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1734020960592 2024-12-12T16:29:24,142 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e3f5228b4e2477797aec56da4e792fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1734020960592 2024-12-12T16:29:24,142 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting bdaa78cbde55435ab588cdb13bb2b752, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1734020961755 2024-12-12T16:29:24,143 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting e2990383524244b0b17eb765050655e8, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1734020961746 2024-12-12T16:29:24,151 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#B#compaction#461 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:24,151 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/d2681672a64e4644b59b1debe00d173c is 50, key is test_row_0/B:col10/1734020961755/Put/seqid=0 2024-12-12T16:29:24,154 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#A#compaction#462 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:24,155 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/9e7aebbb8693425f8e4e05dd9fb2efab is 50, key is test_row_0/A:col10/1734020961755/Put/seqid=0 2024-12-12T16:29:24,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742361_1537 (size=13289) 2024-12-12T16:29:24,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742362_1538 (size=13289) 2024-12-12T16:29:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-12T16:29:24,552 INFO [Thread-2023 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-12-12T16:29:24,554 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:29:24,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-12-12T16:29:24,555 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:29:24,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-12T16:29:24,556 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:29:24,556 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:29:24,570 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/9e7aebbb8693425f8e4e05dd9fb2efab as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/9e7aebbb8693425f8e4e05dd9fb2efab 2024-12-12T16:29:24,573 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/d2681672a64e4644b59b1debe00d173c as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d2681672a64e4644b59b1debe00d173c 2024-12-12T16:29:24,574 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/A of 7f9a08e11e132c4f4473bdc5fef699f6 into 9e7aebbb8693425f8e4e05dd9fb2efab(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:24,574 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:24,574 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/A, priority=12, startTime=1734020964139; duration=0sec 2024-12-12T16:29:24,574 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:24,574 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:A 2024-12-12T16:29:24,574 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:29:24,575 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47578 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:29:24,576 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 7f9a08e11e132c4f4473bdc5fef699f6/C is initiating minor compaction (all files) 2024-12-12T16:29:24,576 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7f9a08e11e132c4f4473bdc5fef699f6/C in TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:24,576 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/8c2b919144af4e139511bbc6b95824df, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/31202a401aad413ba57f2e5432241857, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/63c54c69a836495382cb27a14aad4ac7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/6a10303637eb4593b69fcf3a992056da] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp, totalSize=46.5 K 2024-12-12T16:29:24,576 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c2b919144af4e139511bbc6b95824df, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1734020959583 2024-12-12T16:29:24,577 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 31202a401aad413ba57f2e5432241857, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1734020960561 2024-12-12T16:29:24,577 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63c54c69a836495382cb27a14aad4ac7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1734020960592 2024-12-12T16:29:24,577 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a10303637eb4593b69fcf3a992056da, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1734020961755 2024-12-12T16:29:24,578 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/B of 7f9a08e11e132c4f4473bdc5fef699f6 into d2681672a64e4644b59b1debe00d173c(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:24,578 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:24,578 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/B, priority=12, startTime=1734020964139; duration=0sec 2024-12-12T16:29:24,578 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:24,578 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:B 2024-12-12T16:29:24,585 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7f9a08e11e132c4f4473bdc5fef699f6#C#compaction#463 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:24,585 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/d929e30fab214abfb97c09bee6529993 is 50, key is test_row_0/C:col10/1734020961755/Put/seqid=0 2024-12-12T16:29:24,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742363_1539 (size=13255) 2024-12-12T16:29:24,594 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/d929e30fab214abfb97c09bee6529993 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/d929e30fab214abfb97c09bee6529993 2024-12-12T16:29:24,597 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7f9a08e11e132c4f4473bdc5fef699f6/C of 7f9a08e11e132c4f4473bdc5fef699f6 into d929e30fab214abfb97c09bee6529993(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:24,597 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:24,597 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6., storeName=7f9a08e11e132c4f4473bdc5fef699f6/C, priority=12, startTime=1734020964139; duration=0sec 2024-12-12T16:29:24,597 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:24,597 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7f9a08e11e132c4f4473bdc5fef699f6:C 2024-12-12T16:29:24,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-12T16:29:24,708 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:24,708 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-12T16:29:24,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:24,708 DEBUG [Thread-2032 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0c692575 to 127.0.0.1:52684 2024-12-12T16:29:24,708 DEBUG [Thread-2032 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:24,708 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-12T16:29:24,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:24,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:24,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:24,709 DEBUG [Thread-2026 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0f2423f3 to 127.0.0.1:52684 2024-12-12T16:29:24,709 DEBUG [Thread-2026 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:24,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:24,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:24,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:24,711 DEBUG [Thread-2028 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x184771cf to 127.0.0.1:52684 2024-12-12T16:29:24,711 DEBUG [Thread-2028 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:24,712 DEBUG [Thread-2024 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x41b0e7b6 to 127.0.0.1:52684 2024-12-12T16:29:24,712 DEBUG [Thread-2024 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:24,712 DEBUG [Thread-2030 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x076f0408 to 127.0.0.1:52684 2024-12-12T16:29:24,712 DEBUG [Thread-2030 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:24,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/a9e31f4f62fa4402873c8dc728ce4503 is 50, key is test_row_0/A:col10/1734020962941/Put/seqid=0 2024-12-12T16:29:24,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742364_1540 (size=12301) 2024-12-12T16:29:24,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-12T16:29:25,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:25,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. as already flushing 2024-12-12T16:29:25,070 DEBUG [Thread-2015 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7043f683 to 127.0.0.1:52684 2024-12-12T16:29:25,070 DEBUG [Thread-2015 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:25,072 DEBUG [Thread-2013 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61d38088 to 127.0.0.1:52684 2024-12-12T16:29:25,072 DEBUG [Thread-2013 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:25,117 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=429 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/a9e31f4f62fa4402873c8dc728ce4503 2024-12-12T16:29:25,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/c125ca4b74314655ab6aaf29f0b95546 is 50, key is test_row_0/B:col10/1734020962941/Put/seqid=0 2024-12-12T16:29:25,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742365_1541 (size=12301) 2024-12-12T16:29:25,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-12T16:29:25,528 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=429 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/c125ca4b74314655ab6aaf29f0b95546 2024-12-12T16:29:25,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/65f6cef135724cb89ddbff9c3cbc5bbe is 50, key is test_row_0/C:col10/1734020962941/Put/seqid=0 2024-12-12T16:29:25,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742366_1542 (size=12301) 2024-12-12T16:29:25,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-12T16:29:25,937 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=429 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/65f6cef135724cb89ddbff9c3cbc5bbe 2024-12-12T16:29:25,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/a9e31f4f62fa4402873c8dc728ce4503 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/a9e31f4f62fa4402873c8dc728ce4503 2024-12-12T16:29:25,943 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/a9e31f4f62fa4402873c8dc728ce4503, entries=150, sequenceid=429, filesize=12.0 K 2024-12-12T16:29:25,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/c125ca4b74314655ab6aaf29f0b95546 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/c125ca4b74314655ab6aaf29f0b95546 2024-12-12T16:29:25,947 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/c125ca4b74314655ab6aaf29f0b95546, entries=150, sequenceid=429, filesize=12.0 K 2024-12-12T16:29:25,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/65f6cef135724cb89ddbff9c3cbc5bbe as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/65f6cef135724cb89ddbff9c3cbc5bbe 2024-12-12T16:29:25,950 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/65f6cef135724cb89ddbff9c3cbc5bbe, entries=150, sequenceid=429, filesize=12.0 K 2024-12-12T16:29:25,950 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=13.42 KB/13740 for 7f9a08e11e132c4f4473bdc5fef699f6 in 1242ms, sequenceid=429, compaction requested=false 2024-12-12T16:29:25,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:25,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:25,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-12-12T16:29:25,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-12-12T16:29:25,953 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-12-12T16:29:25,953 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3960 sec 2024-12-12T16:29:25,954 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 1.3990 sec 2024-12-12T16:29:26,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-12T16:29:26,659 INFO [Thread-2023 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-12-12T16:29:32,204 DEBUG [Thread-2019 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x34b30c39 to 127.0.0.1:52684 2024-12-12T16:29:32,205 DEBUG [Thread-2019 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:32,262 DEBUG [Thread-2017 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b0c2472 to 127.0.0.1:52684 2024-12-12T16:29:32,262 DEBUG [Thread-2017 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:32,267 DEBUG [Thread-2021 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d672ed2 to 127.0.0.1:52684 2024-12-12T16:29:32,267 DEBUG [Thread-2021 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:32,267 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T16:29:32,267 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 89 2024-12-12T16:29:32,267 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 113 2024-12-12T16:29:32,267 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-12-12T16:29:32,267 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 45 2024-12-12T16:29:32,267 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 41 2024-12-12T16:29:32,267 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T16:29:32,267 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T16:29:32,267 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2560 2024-12-12T16:29:32,267 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7680 rows 2024-12-12T16:29:32,267 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2564 2024-12-12T16:29:32,267 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7692 rows 2024-12-12T16:29:32,267 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2565 2024-12-12T16:29:32,267 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7695 rows 2024-12-12T16:29:32,267 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2564 2024-12-12T16:29:32,267 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7692 rows 2024-12-12T16:29:32,267 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2587 2024-12-12T16:29:32,267 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7759 rows 2024-12-12T16:29:32,267 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T16:29:32,267 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7177efc9 to 127.0.0.1:52684 2024-12-12T16:29:32,267 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:32,271 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T16:29:32,272 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T16:29:32,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T16:29:32,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-12T16:29:32,275 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020972275"}]},"ts":"1734020972275"} 2024-12-12T16:29:32,276 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T16:29:32,278 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T16:29:32,278 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T16:29:32,279 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7f9a08e11e132c4f4473bdc5fef699f6, UNASSIGN}] 2024-12-12T16:29:32,280 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7f9a08e11e132c4f4473bdc5fef699f6, UNASSIGN 2024-12-12T16:29:32,281 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=7f9a08e11e132c4f4473bdc5fef699f6, regionState=CLOSING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:32,281 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T16:29:32,281 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; CloseRegionProcedure 7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:29:32,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-12T16:29:32,432 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:32,433 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(124): Close 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:32,433 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T16:29:32,433 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1681): Closing 7f9a08e11e132c4f4473bdc5fef699f6, disabling compactions & flushes 2024-12-12T16:29:32,433 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:32,433 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:32,433 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. after waiting 0 ms 2024-12-12T16:29:32,433 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:32,433 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2837): Flushing 7f9a08e11e132c4f4473bdc5fef699f6 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-12T16:29:32,433 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=A 2024-12-12T16:29:32,434 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:32,434 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=B 2024-12-12T16:29:32,434 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:32,434 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7f9a08e11e132c4f4473bdc5fef699f6, store=C 2024-12-12T16:29:32,434 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:32,437 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/773c50b9e8a048f98384667eef8f5192 is 50, key is test_row_0/A:col10/1734020972266/Put/seqid=0 2024-12-12T16:29:32,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742367_1543 (size=12301) 2024-12-12T16:29:32,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-12T16:29:32,841 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/773c50b9e8a048f98384667eef8f5192 2024-12-12T16:29:32,847 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/a057b6e766f74b4f85af3c4caaee2837 is 50, key is test_row_0/B:col10/1734020972266/Put/seqid=0 2024-12-12T16:29:32,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742368_1544 (size=12301) 2024-12-12T16:29:32,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-12T16:29:33,252 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/a057b6e766f74b4f85af3c4caaee2837 2024-12-12T16:29:33,257 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/13cb2f5d7fd4471c93ea55afc69be894 is 50, key is test_row_0/C:col10/1734020972266/Put/seqid=0 2024-12-12T16:29:33,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742369_1545 (size=12301) 2024-12-12T16:29:33,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-12T16:29:33,660 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/13cb2f5d7fd4471c93ea55afc69be894 2024-12-12T16:29:33,663 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/A/773c50b9e8a048f98384667eef8f5192 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/773c50b9e8a048f98384667eef8f5192 2024-12-12T16:29:33,666 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/773c50b9e8a048f98384667eef8f5192, entries=150, sequenceid=437, filesize=12.0 K 2024-12-12T16:29:33,666 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/B/a057b6e766f74b4f85af3c4caaee2837 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/a057b6e766f74b4f85af3c4caaee2837 2024-12-12T16:29:33,668 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/a057b6e766f74b4f85af3c4caaee2837, entries=150, sequenceid=437, filesize=12.0 K 2024-12-12T16:29:33,669 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/.tmp/C/13cb2f5d7fd4471c93ea55afc69be894 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/13cb2f5d7fd4471c93ea55afc69be894 2024-12-12T16:29:33,671 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/13cb2f5d7fd4471c93ea55afc69be894, entries=150, sequenceid=437, filesize=12.0 K 2024-12-12T16:29:33,672 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 7f9a08e11e132c4f4473bdc5fef699f6 in 1239ms, sequenceid=437, compaction requested=true 2024-12-12T16:29:33,672 DEBUG [StoreCloser-TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/ab06a529d7184b7fb272395deced6312, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/502b25779e9941bf8fd4250be71274d2, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/3c772500aea64e599ba935e595efb48e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/8b9caeab00dd499b95da3fcd18b2d4dc, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/72a7ae4080f74d0c8e9da45836ceba36, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/fc33bc7e9a1846cf9fc33fe23904591e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/194ebda3369f48f690b5cbc14111f812, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/e7361bd2a95448618843d871345d2518, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/6c3218b0d2fc496fb44b5e79f60a55c7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/9c6c013b733a46be8d040191827ad816, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/dc8faf73f74847c691c6f687475d4d0a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/32ce91b4a58d46199e8c2a1da1f95c8d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/dd4a29ab7a6e4688b2420764206a59b0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/7630274673bc42dba091ba25a08cd711, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/ea2f3099aedd4a2ca67429b66feec58c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/bef2065021204e0e9a1ba762d7d1b180, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/0de5a350decc4b2e8b0ad1c1d8732c95, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/df8d4e635d16444bb658294d49a4cf83, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/a5fa2b12d0ce42faa95fd123ea09b8f0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/f430940484c54fbcbb63db574ef0f705, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/6b7e90602482454cbd3ef829869d0e4c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/f25d3215b8a84be4907a043a6516f3f8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/0dca8cac80334dd38b0fe00c19ebd900, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/bc55f306090c4a929504ae206f962e32, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/6a53e54197de4ba7a008327c99d11508, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/f59f5d6eb82646f0ac84b6b11fa3f60c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/c8beb52c60524ca59277d298d445d1a9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/0e3f5228b4e2477797aec56da4e792fd, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/e2990383524244b0b17eb765050655e8] to archive 2024-12-12T16:29:33,673 DEBUG [StoreCloser-TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:29:33,675 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/ab06a529d7184b7fb272395deced6312 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/ab06a529d7184b7fb272395deced6312 2024-12-12T16:29:33,675 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/502b25779e9941bf8fd4250be71274d2 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/502b25779e9941bf8fd4250be71274d2 2024-12-12T16:29:33,675 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/8b9caeab00dd499b95da3fcd18b2d4dc to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/8b9caeab00dd499b95da3fcd18b2d4dc 2024-12-12T16:29:33,675 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/3c772500aea64e599ba935e595efb48e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/3c772500aea64e599ba935e595efb48e 2024-12-12T16:29:33,675 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/72a7ae4080f74d0c8e9da45836ceba36 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/72a7ae4080f74d0c8e9da45836ceba36 2024-12-12T16:29:33,679 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/fc33bc7e9a1846cf9fc33fe23904591e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/fc33bc7e9a1846cf9fc33fe23904591e 2024-12-12T16:29:33,679 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/194ebda3369f48f690b5cbc14111f812 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/194ebda3369f48f690b5cbc14111f812 2024-12-12T16:29:33,681 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/6c3218b0d2fc496fb44b5e79f60a55c7 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/6c3218b0d2fc496fb44b5e79f60a55c7 2024-12-12T16:29:33,681 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/dc8faf73f74847c691c6f687475d4d0a to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/dc8faf73f74847c691c6f687475d4d0a 2024-12-12T16:29:33,681 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/ea2f3099aedd4a2ca67429b66feec58c to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/ea2f3099aedd4a2ca67429b66feec58c 2024-12-12T16:29:33,681 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/dd4a29ab7a6e4688b2420764206a59b0 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/dd4a29ab7a6e4688b2420764206a59b0 2024-12-12T16:29:33,681 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/e7361bd2a95448618843d871345d2518 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/e7361bd2a95448618843d871345d2518 2024-12-12T16:29:33,681 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/7630274673bc42dba091ba25a08cd711 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/7630274673bc42dba091ba25a08cd711 2024-12-12T16:29:33,681 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/9c6c013b733a46be8d040191827ad816 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/9c6c013b733a46be8d040191827ad816 2024-12-12T16:29:33,681 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/32ce91b4a58d46199e8c2a1da1f95c8d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/32ce91b4a58d46199e8c2a1da1f95c8d 2024-12-12T16:29:33,682 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/bef2065021204e0e9a1ba762d7d1b180 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/bef2065021204e0e9a1ba762d7d1b180 2024-12-12T16:29:33,683 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/6b7e90602482454cbd3ef829869d0e4c to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/6b7e90602482454cbd3ef829869d0e4c 2024-12-12T16:29:33,683 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/a5fa2b12d0ce42faa95fd123ea09b8f0 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/a5fa2b12d0ce42faa95fd123ea09b8f0 2024-12-12T16:29:33,683 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/0dca8cac80334dd38b0fe00c19ebd900 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/0dca8cac80334dd38b0fe00c19ebd900 2024-12-12T16:29:33,683 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/0de5a350decc4b2e8b0ad1c1d8732c95 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/0de5a350decc4b2e8b0ad1c1d8732c95 2024-12-12T16:29:33,683 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/f430940484c54fbcbb63db574ef0f705 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/f430940484c54fbcbb63db574ef0f705 2024-12-12T16:29:33,683 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/df8d4e635d16444bb658294d49a4cf83 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/df8d4e635d16444bb658294d49a4cf83 2024-12-12T16:29:33,683 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/f25d3215b8a84be4907a043a6516f3f8 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/f25d3215b8a84be4907a043a6516f3f8 2024-12-12T16:29:33,684 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/bc55f306090c4a929504ae206f962e32 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/bc55f306090c4a929504ae206f962e32 2024-12-12T16:29:33,684 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/f59f5d6eb82646f0ac84b6b11fa3f60c to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/f59f5d6eb82646f0ac84b6b11fa3f60c 2024-12-12T16:29:33,684 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/c8beb52c60524ca59277d298d445d1a9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/c8beb52c60524ca59277d298d445d1a9 2024-12-12T16:29:33,684 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/6a53e54197de4ba7a008327c99d11508 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/6a53e54197de4ba7a008327c99d11508 2024-12-12T16:29:33,684 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/e2990383524244b0b17eb765050655e8 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/e2990383524244b0b17eb765050655e8 2024-12-12T16:29:33,684 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/0e3f5228b4e2477797aec56da4e792fd to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/0e3f5228b4e2477797aec56da4e792fd 2024-12-12T16:29:33,685 DEBUG [StoreCloser-TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/3c3238ac142f4a00807830f5fed10cb1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/20b7db9ddb8c4b118222dd7c9ee922c6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/2caed9e86cbb40e29e433674b92b52ff, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/5985ec53a08a4ab8a77102c387654268, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/a3ccb499f99d4ff593e8703f106faaf9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/f9f34da35d82493fb664ff0ab6ea4dae, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d2ddbc536cc44a16b07bf0ec7657adee, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d1827814fdcf4f389f5293c06a1b6a10, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/0d9c00e20d244e28a269c32312d05a65, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d2fcbeddd8b84fb088c55f4e921f2f2e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/e4beb31865c149559d9d60230d95103f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/21131d683f534e50b675e55c117d8882, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/afe4fac00ee243cfa355eb2ac234b25e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/8ecff760ab13415bb877bbeafac3d348, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/4234b091fc9c460598a64a3c9212b8fd, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/7aa29c93eb744e6488d3d79a1942878e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/65c3711cb5e045a987dc6fdd941f9def, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/02570d285f264873abef038b5003dd19, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/cb558f5b89dd42cea2c45bde5e6c2095, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/88c05bc586b84d3690901211ca8afb15, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/574e65705c014b1d98f7faf354674d02, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d017e80487c14410aa287adde1be67bd, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/6db266ee134d4ed7936c6fb103c60f24, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/92dda5b8cc50420dac0fe372150390f9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/c94ebf30b9254adf8886fedee04d6716, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/adbee3a52f504d69bcba55d3d63cb1fe, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/9adc64818b73484d9e8cf33ee5ea9d16, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/565b5157c29e482e81f0c20ae1266c8d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/bdaa78cbde55435ab588cdb13bb2b752] to archive 2024-12-12T16:29:33,686 DEBUG [StoreCloser-TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:29:33,688 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/20b7db9ddb8c4b118222dd7c9ee922c6 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/20b7db9ddb8c4b118222dd7c9ee922c6 2024-12-12T16:29:33,688 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/5985ec53a08a4ab8a77102c387654268 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/5985ec53a08a4ab8a77102c387654268 2024-12-12T16:29:33,688 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/2caed9e86cbb40e29e433674b92b52ff to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/2caed9e86cbb40e29e433674b92b52ff 2024-12-12T16:29:33,688 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d1827814fdcf4f389f5293c06a1b6a10 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d1827814fdcf4f389f5293c06a1b6a10 2024-12-12T16:29:33,688 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/3c3238ac142f4a00807830f5fed10cb1 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/3c3238ac142f4a00807830f5fed10cb1 2024-12-12T16:29:33,688 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/f9f34da35d82493fb664ff0ab6ea4dae to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/f9f34da35d82493fb664ff0ab6ea4dae 2024-12-12T16:29:33,688 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/a3ccb499f99d4ff593e8703f106faaf9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/a3ccb499f99d4ff593e8703f106faaf9 2024-12-12T16:29:33,688 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d2ddbc536cc44a16b07bf0ec7657adee to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d2ddbc536cc44a16b07bf0ec7657adee 2024-12-12T16:29:33,689 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/0d9c00e20d244e28a269c32312d05a65 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/0d9c00e20d244e28a269c32312d05a65 2024-12-12T16:29:33,689 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/e4beb31865c149559d9d60230d95103f to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/e4beb31865c149559d9d60230d95103f 2024-12-12T16:29:33,689 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d2fcbeddd8b84fb088c55f4e921f2f2e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d2fcbeddd8b84fb088c55f4e921f2f2e 2024-12-12T16:29:33,690 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/21131d683f534e50b675e55c117d8882 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/21131d683f534e50b675e55c117d8882 2024-12-12T16:29:33,690 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/8ecff760ab13415bb877bbeafac3d348 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/8ecff760ab13415bb877bbeafac3d348 2024-12-12T16:29:33,690 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/4234b091fc9c460598a64a3c9212b8fd to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/4234b091fc9c460598a64a3c9212b8fd 2024-12-12T16:29:33,690 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/afe4fac00ee243cfa355eb2ac234b25e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/afe4fac00ee243cfa355eb2ac234b25e 2024-12-12T16:29:33,690 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/7aa29c93eb744e6488d3d79a1942878e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/7aa29c93eb744e6488d3d79a1942878e 2024-12-12T16:29:33,691 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/02570d285f264873abef038b5003dd19 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/02570d285f264873abef038b5003dd19 2024-12-12T16:29:33,691 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/65c3711cb5e045a987dc6fdd941f9def to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/65c3711cb5e045a987dc6fdd941f9def 2024-12-12T16:29:33,691 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/cb558f5b89dd42cea2c45bde5e6c2095 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/cb558f5b89dd42cea2c45bde5e6c2095 2024-12-12T16:29:33,691 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/574e65705c014b1d98f7faf354674d02 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/574e65705c014b1d98f7faf354674d02 2024-12-12T16:29:33,691 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d017e80487c14410aa287adde1be67bd to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d017e80487c14410aa287adde1be67bd 2024-12-12T16:29:33,691 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/88c05bc586b84d3690901211ca8afb15 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/88c05bc586b84d3690901211ca8afb15 2024-12-12T16:29:33,692 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/6db266ee134d4ed7936c6fb103c60f24 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/6db266ee134d4ed7936c6fb103c60f24 2024-12-12T16:29:33,692 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/92dda5b8cc50420dac0fe372150390f9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/92dda5b8cc50420dac0fe372150390f9 2024-12-12T16:29:33,692 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/adbee3a52f504d69bcba55d3d63cb1fe to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/adbee3a52f504d69bcba55d3d63cb1fe 2024-12-12T16:29:33,692 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/c94ebf30b9254adf8886fedee04d6716 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/c94ebf30b9254adf8886fedee04d6716 2024-12-12T16:29:33,692 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/565b5157c29e482e81f0c20ae1266c8d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/565b5157c29e482e81f0c20ae1266c8d 2024-12-12T16:29:33,693 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/9adc64818b73484d9e8cf33ee5ea9d16 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/9adc64818b73484d9e8cf33ee5ea9d16 2024-12-12T16:29:33,693 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/bdaa78cbde55435ab588cdb13bb2b752 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/bdaa78cbde55435ab588cdb13bb2b752 2024-12-12T16:29:33,694 DEBUG [StoreCloser-TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/6a6bebf0bacb472a93e772b4e2a0c190, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/30c5d557a5da49bd81bf329904cec3e5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/d9fb3df42f254ffba9bed1983a630de8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/fbf2fef6bcb64c6096226e45cfbb7c32, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/f59f7f5bf6274d7487a5c62d1e52f021, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/fd651e3b5e8e4452b62d0cdf21ffd2cd, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/e7df1806ade34566ad59d7612d32e7db, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/9a0feaf138724e9bb824767b44faf6bb, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/a38f787c0fab4164a2ad4a025167a947, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/2e89038b79e249dea46a2b8f373d3ec3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/89398b25d08f437eb622252245dad65b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/0c3c22f5d8a04837856abd6986b9bac5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/f0df70f440dc45a292651a3cf1b1ef8b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/8356c5ba0fa5497fb07b710d4de935a5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/03a9a597c35b4a179b2a19b37ce749c8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/fe1c48837006493e9d3878bbfdf538ce, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/eb689b7596e74eb7a7dff667b41b46e9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/385eebed687146d784da6d8b8e0c2207, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/2c14cea1133c4950a50e3f0e76876e68, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/bcc6d544e2e3488ba6291f572e27aa5c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/7c264fb2514241cf938ef9d1c1d92d3d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/9f27c6b14f384a188ed4866f90d5f3e9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/31ef6ed03ac04ddc8182f00996b87a69, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/8c2b919144af4e139511bbc6b95824df, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/23a5e7e347da46dc925f4d75996d9972, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/31202a401aad413ba57f2e5432241857, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/63c54c69a836495382cb27a14aad4ac7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/6a10303637eb4593b69fcf3a992056da] to archive 2024-12-12T16:29:33,694 DEBUG [StoreCloser-TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:29:33,696 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/6a6bebf0bacb472a93e772b4e2a0c190 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/6a6bebf0bacb472a93e772b4e2a0c190 2024-12-12T16:29:33,696 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/30c5d557a5da49bd81bf329904cec3e5 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/30c5d557a5da49bd81bf329904cec3e5 2024-12-12T16:29:33,696 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/fbf2fef6bcb64c6096226e45cfbb7c32 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/fbf2fef6bcb64c6096226e45cfbb7c32 2024-12-12T16:29:33,696 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/f59f7f5bf6274d7487a5c62d1e52f021 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/f59f7f5bf6274d7487a5c62d1e52f021 2024-12-12T16:29:33,696 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/d9fb3df42f254ffba9bed1983a630de8 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/d9fb3df42f254ffba9bed1983a630de8 2024-12-12T16:29:33,697 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/fd651e3b5e8e4452b62d0cdf21ffd2cd to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/fd651e3b5e8e4452b62d0cdf21ffd2cd 2024-12-12T16:29:33,697 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/e7df1806ade34566ad59d7612d32e7db to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/e7df1806ade34566ad59d7612d32e7db 2024-12-12T16:29:33,698 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/2e89038b79e249dea46a2b8f373d3ec3 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/2e89038b79e249dea46a2b8f373d3ec3 2024-12-12T16:29:33,698 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/a38f787c0fab4164a2ad4a025167a947 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/a38f787c0fab4164a2ad4a025167a947 2024-12-12T16:29:33,698 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/f0df70f440dc45a292651a3cf1b1ef8b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/f0df70f440dc45a292651a3cf1b1ef8b 2024-12-12T16:29:33,698 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/0c3c22f5d8a04837856abd6986b9bac5 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/0c3c22f5d8a04837856abd6986b9bac5 2024-12-12T16:29:33,698 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/8356c5ba0fa5497fb07b710d4de935a5 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/8356c5ba0fa5497fb07b710d4de935a5 2024-12-12T16:29:33,696 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/9a0feaf138724e9bb824767b44faf6bb to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/9a0feaf138724e9bb824767b44faf6bb 2024-12-12T16:29:33,699 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/03a9a597c35b4a179b2a19b37ce749c8 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/03a9a597c35b4a179b2a19b37ce749c8 2024-12-12T16:29:33,699 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/fe1c48837006493e9d3878bbfdf538ce to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/fe1c48837006493e9d3878bbfdf538ce 2024-12-12T16:29:33,700 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/385eebed687146d784da6d8b8e0c2207 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/385eebed687146d784da6d8b8e0c2207 2024-12-12T16:29:33,700 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/2c14cea1133c4950a50e3f0e76876e68 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/2c14cea1133c4950a50e3f0e76876e68 2024-12-12T16:29:33,700 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/eb689b7596e74eb7a7dff667b41b46e9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/eb689b7596e74eb7a7dff667b41b46e9 2024-12-12T16:29:33,700 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/bcc6d544e2e3488ba6291f572e27aa5c to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/bcc6d544e2e3488ba6291f572e27aa5c 2024-12-12T16:29:33,700 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/89398b25d08f437eb622252245dad65b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/89398b25d08f437eb622252245dad65b 2024-12-12T16:29:33,700 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/7c264fb2514241cf938ef9d1c1d92d3d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/7c264fb2514241cf938ef9d1c1d92d3d 2024-12-12T16:29:33,700 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/9f27c6b14f384a188ed4866f90d5f3e9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/9f27c6b14f384a188ed4866f90d5f3e9 2024-12-12T16:29:33,701 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/31ef6ed03ac04ddc8182f00996b87a69 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/31ef6ed03ac04ddc8182f00996b87a69 2024-12-12T16:29:33,701 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/23a5e7e347da46dc925f4d75996d9972 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/23a5e7e347da46dc925f4d75996d9972 2024-12-12T16:29:33,702 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/31202a401aad413ba57f2e5432241857 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/31202a401aad413ba57f2e5432241857 2024-12-12T16:29:33,702 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/8c2b919144af4e139511bbc6b95824df to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/8c2b919144af4e139511bbc6b95824df 2024-12-12T16:29:33,702 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/63c54c69a836495382cb27a14aad4ac7 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/63c54c69a836495382cb27a14aad4ac7 2024-12-12T16:29:33,702 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/6a10303637eb4593b69fcf3a992056da to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/6a10303637eb4593b69fcf3a992056da 2024-12-12T16:29:33,705 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/recovered.edits/440.seqid, newMaxSeqId=440, maxSeqId=1 2024-12-12T16:29:33,705 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6. 2024-12-12T16:29:33,705 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1635): Region close journal for 7f9a08e11e132c4f4473bdc5fef699f6: 2024-12-12T16:29:33,706 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(170): Closed 7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:33,707 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=7f9a08e11e132c4f4473bdc5fef699f6, regionState=CLOSED 2024-12-12T16:29:33,708 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-12-12T16:29:33,709 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; CloseRegionProcedure 7f9a08e11e132c4f4473bdc5fef699f6, server=4f6a4780a2f6,41933,1734020809476 in 1.4270 sec 2024-12-12T16:29:33,709 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-12-12T16:29:33,710 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7f9a08e11e132c4f4473bdc5fef699f6, UNASSIGN in 1.4300 sec 2024-12-12T16:29:33,711 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-12-12T16:29:33,711 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4320 sec 2024-12-12T16:29:33,712 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020973711"}]},"ts":"1734020973711"} 2024-12-12T16:29:33,712 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T16:29:33,714 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T16:29:33,715 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4430 sec 2024-12-12T16:29:34,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-12T16:29:34,378 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-12-12T16:29:34,378 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T16:29:34,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:29:34,380 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=152, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:29:34,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-12T16:29:34,380 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=152, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:29:34,382 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:34,384 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A, FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B, FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C, FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/recovered.edits] 2024-12-12T16:29:34,387 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/773c50b9e8a048f98384667eef8f5192 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/773c50b9e8a048f98384667eef8f5192 2024-12-12T16:29:34,387 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/a9e31f4f62fa4402873c8dc728ce4503 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/a9e31f4f62fa4402873c8dc728ce4503 2024-12-12T16:29:34,387 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/9e7aebbb8693425f8e4e05dd9fb2efab to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/A/9e7aebbb8693425f8e4e05dd9fb2efab 2024-12-12T16:29:34,389 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/a057b6e766f74b4f85af3c4caaee2837 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/a057b6e766f74b4f85af3c4caaee2837 2024-12-12T16:29:34,389 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/c125ca4b74314655ab6aaf29f0b95546 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/c125ca4b74314655ab6aaf29f0b95546 2024-12-12T16:29:34,389 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d2681672a64e4644b59b1debe00d173c to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/B/d2681672a64e4644b59b1debe00d173c 2024-12-12T16:29:34,392 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/65f6cef135724cb89ddbff9c3cbc5bbe to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/65f6cef135724cb89ddbff9c3cbc5bbe 2024-12-12T16:29:34,392 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/13cb2f5d7fd4471c93ea55afc69be894 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/13cb2f5d7fd4471c93ea55afc69be894 2024-12-12T16:29:34,392 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/d929e30fab214abfb97c09bee6529993 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/C/d929e30fab214abfb97c09bee6529993 2024-12-12T16:29:34,394 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/recovered.edits/440.seqid to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6/recovered.edits/440.seqid 2024-12-12T16:29:34,395 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/7f9a08e11e132c4f4473bdc5fef699f6 2024-12-12T16:29:34,395 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T16:29:34,397 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=152, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:29:34,398 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T16:29:34,401 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T16:29:34,402 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=152, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:29:34,402 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T16:29:34,402 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734020974402"}]},"ts":"9223372036854775807"} 2024-12-12T16:29:34,409 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T16:29:34,409 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 7f9a08e11e132c4f4473bdc5fef699f6, NAME => 'TestAcidGuarantees,,1734020942532.7f9a08e11e132c4f4473bdc5fef699f6.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T16:29:34,409 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T16:29:34,409 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734020974409"}]},"ts":"9223372036854775807"} 2024-12-12T16:29:34,411 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T16:29:34,416 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=152, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:29:34,417 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 38 msec 2024-12-12T16:29:34,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-12T16:29:34,481 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 152 completed 2024-12-12T16:29:34,492 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=244 (was 245), OpenFileDescriptor=445 (was 449), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=377 (was 341) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7644 (was 7677) 2024-12-12T16:29:34,502 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=244, OpenFileDescriptor=445, MaxFileDescriptor=1048576, SystemLoadAverage=377, ProcessCount=11, AvailableMemoryMB=7644 2024-12-12T16:29:34,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T16:29:34,503 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T16:29:34,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=153, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T16:29:34,505 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=153, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T16:29:34,505 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:34,505 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 153 2024-12-12T16:29:34,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T16:29:34,506 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=153, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T16:29:34,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742370_1546 (size=963) 2024-12-12T16:29:34,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T16:29:34,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T16:29:34,912 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc 2024-12-12T16:29:34,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742371_1547 (size=53) 2024-12-12T16:29:35,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T16:29:35,317 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:29:35,317 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 8e89f008dc04dbad786e718ad49c4912, disabling compactions & flushes 2024-12-12T16:29:35,317 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:35,317 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:35,317 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. after waiting 0 ms 2024-12-12T16:29:35,317 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:35,317 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:35,317 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:35,318 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=153, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T16:29:35,318 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734020975318"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734020975318"}]},"ts":"1734020975318"} 2024-12-12T16:29:35,319 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T16:29:35,320 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=153, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T16:29:35,320 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020975320"}]},"ts":"1734020975320"} 2024-12-12T16:29:35,320 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T16:29:35,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8e89f008dc04dbad786e718ad49c4912, ASSIGN}] 2024-12-12T16:29:35,325 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8e89f008dc04dbad786e718ad49c4912, ASSIGN 2024-12-12T16:29:35,325 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=8e89f008dc04dbad786e718ad49c4912, ASSIGN; state=OFFLINE, location=4f6a4780a2f6,41933,1734020809476; forceNewPlan=false, retain=false 2024-12-12T16:29:35,476 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=8e89f008dc04dbad786e718ad49c4912, regionState=OPENING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:35,477 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE; OpenRegionProcedure 8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:29:35,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T16:29:35,628 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:35,630 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:35,631 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(7285): Opening region: {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} 2024-12-12T16:29:35,631 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:35,631 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:29:35,631 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(7327): checking encryption for 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:35,631 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(7330): checking classloading for 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:35,632 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:35,633 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:29:35,633 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8e89f008dc04dbad786e718ad49c4912 columnFamilyName A 2024-12-12T16:29:35,633 DEBUG [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:35,634 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] regionserver.HStore(327): Store=8e89f008dc04dbad786e718ad49c4912/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:29:35,634 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:35,635 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:29:35,635 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8e89f008dc04dbad786e718ad49c4912 columnFamilyName B 2024-12-12T16:29:35,635 DEBUG [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:35,635 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] regionserver.HStore(327): Store=8e89f008dc04dbad786e718ad49c4912/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:29:35,635 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:35,636 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:29:35,636 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8e89f008dc04dbad786e718ad49c4912 columnFamilyName C 2024-12-12T16:29:35,636 DEBUG [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:35,637 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] regionserver.HStore(327): Store=8e89f008dc04dbad786e718ad49c4912/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:29:35,637 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:35,638 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:35,638 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:35,639 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T16:29:35,640 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(1085): writing seq id for 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:35,641 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T16:29:35,642 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(1102): Opened 8e89f008dc04dbad786e718ad49c4912; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72561075, jitterRate=0.08124427497386932}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T16:29:35,642 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegion(1001): Region open journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:35,643 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., pid=155, masterSystemTime=1734020975628 2024-12-12T16:29:35,644 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:35,644 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=155}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:35,644 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=8e89f008dc04dbad786e718ad49c4912, regionState=OPEN, openSeqNum=2, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:35,647 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-12-12T16:29:35,647 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; OpenRegionProcedure 8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 in 168 msec 2024-12-12T16:29:35,648 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-12-12T16:29:35,648 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8e89f008dc04dbad786e718ad49c4912, ASSIGN in 323 msec 2024-12-12T16:29:35,649 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=153, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T16:29:35,649 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734020975649"}]},"ts":"1734020975649"} 2024-12-12T16:29:35,650 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T16:29:35,652 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=153, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T16:29:35,653 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1490 sec 2024-12-12T16:29:36,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T16:29:36,609 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 153 completed 2024-12-12T16:29:36,610 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17327621 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11a52cdf 2024-12-12T16:29:36,616 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e047c09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:36,617 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:36,618 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56804, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:36,619 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T16:29:36,620 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47292, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T16:29:36,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T16:29:36,622 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T16:29:36,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=156, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-12T16:29:36,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742372_1548 (size=999) 2024-12-12T16:29:37,031 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-12T16:29:37,031 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-12T16:29:37,033 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T16:29:37,035 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8e89f008dc04dbad786e718ad49c4912, REOPEN/MOVE}] 2024-12-12T16:29:37,035 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8e89f008dc04dbad786e718ad49c4912, REOPEN/MOVE 2024-12-12T16:29:37,036 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=8e89f008dc04dbad786e718ad49c4912, regionState=CLOSING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,036 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T16:29:37,037 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE; CloseRegionProcedure 8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:29:37,188 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,188 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(124): Close 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:37,188 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T16:29:37,188 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1681): Closing 8e89f008dc04dbad786e718ad49c4912, disabling compactions & flushes 2024-12-12T16:29:37,188 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:37,188 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:37,188 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. after waiting 0 ms 2024-12-12T16:29:37,188 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:37,191 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-12T16:29:37,192 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:37,192 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1635): Region close journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:37,192 WARN [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegionServer(3786): Not adding moved region record: 8e89f008dc04dbad786e718ad49c4912 to self. 2024-12-12T16:29:37,193 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(170): Closed 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:37,193 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=8e89f008dc04dbad786e718ad49c4912, regionState=CLOSED 2024-12-12T16:29:37,195 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-12-12T16:29:37,195 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; CloseRegionProcedure 8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 in 158 msec 2024-12-12T16:29:37,196 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=8e89f008dc04dbad786e718ad49c4912, REOPEN/MOVE; state=CLOSED, location=4f6a4780a2f6,41933,1734020809476; forceNewPlan=false, retain=true 2024-12-12T16:29:37,346 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=8e89f008dc04dbad786e718ad49c4912, regionState=OPENING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,347 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=158, state=RUNNABLE; OpenRegionProcedure 8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:29:37,499 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,501 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:37,501 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7285): Opening region: {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} 2024-12-12T16:29:37,501 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:37,501 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T16:29:37,501 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7327): checking encryption for 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:37,501 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7330): checking classloading for 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:37,502 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:37,503 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:29:37,503 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8e89f008dc04dbad786e718ad49c4912 columnFamilyName A 2024-12-12T16:29:37,504 DEBUG [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:37,505 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] regionserver.HStore(327): Store=8e89f008dc04dbad786e718ad49c4912/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:29:37,505 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:37,505 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:29:37,505 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8e89f008dc04dbad786e718ad49c4912 columnFamilyName B 2024-12-12T16:29:37,505 DEBUG [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:37,506 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] regionserver.HStore(327): Store=8e89f008dc04dbad786e718ad49c4912/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:29:37,506 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:37,506 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T16:29:37,506 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8e89f008dc04dbad786e718ad49c4912 columnFamilyName C 2024-12-12T16:29:37,506 DEBUG [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:37,506 INFO [StoreOpener-8e89f008dc04dbad786e718ad49c4912-1 {}] regionserver.HStore(327): Store=8e89f008dc04dbad786e718ad49c4912/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T16:29:37,507 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:37,507 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:37,508 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:37,509 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T16:29:37,510 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1085): writing seq id for 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:37,510 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1102): Opened 8e89f008dc04dbad786e718ad49c4912; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60315598, jitterRate=-0.10122755169868469}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T16:29:37,511 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1001): Region open journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:37,511 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., pid=160, masterSystemTime=1734020977498 2024-12-12T16:29:37,512 DEBUG [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:37,512 INFO [RS_OPEN_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:37,513 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=8e89f008dc04dbad786e718ad49c4912, regionState=OPEN, openSeqNum=5, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,514 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=158 2024-12-12T16:29:37,514 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=158, state=SUCCESS; OpenRegionProcedure 8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 in 166 msec 2024-12-12T16:29:37,515 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-12-12T16:29:37,515 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8e89f008dc04dbad786e718ad49c4912, REOPEN/MOVE in 480 msec 2024-12-12T16:29:37,517 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-12-12T16:29:37,517 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 483 msec 2024-12-12T16:29:37,518 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 895 msec 2024-12-12T16:29:37,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-12T16:29:37,520 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1584f18a to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d7fe431 2024-12-12T16:29:37,523 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60d631a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:37,525 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5b914bf4 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@91d72db 2024-12-12T16:29:37,527 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58971172, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:37,528 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f6a59e4 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5d836f78 2024-12-12T16:29:37,533 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d7fe93b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:37,534 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x150e08ed to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53305d9b 2024-12-12T16:29:37,538 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11c440f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:37,539 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3a3b66d3 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bb6288a 2024-12-12T16:29:37,541 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58460ef3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:37,542 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06556601 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e8cd1ae 2024-12-12T16:29:37,546 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bb75907, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:37,546 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x458a85fd to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4d832d43 2024-12-12T16:29:37,549 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c1d3a95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:37,550 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x410bf0c8 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@15b6349f 2024-12-12T16:29:37,554 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@503a7d2e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:37,554 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x67adb273 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@439b60d5 2024-12-12T16:29:37,557 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@404bb685, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:37,557 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x474dec36 to 127.0.0.1:52684 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f48b1c2 2024-12-12T16:29:37,561 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42aacb30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T16:29:37,565 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:29:37,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-12-12T16:29:37,566 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:29:37,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T16:29:37,567 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:29:37,567 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:29:37,568 DEBUG [hconnection-0x2f821463-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:37,569 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56814, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:37,572 DEBUG [hconnection-0x70eb0531-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:37,573 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56826, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:37,581 DEBUG [hconnection-0x59e02ba0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:37,582 DEBUG [hconnection-0x12f30586-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:37,583 DEBUG [hconnection-0x2a3faba5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:37,583 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56836, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:37,583 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56832, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:37,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:37,584 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56852, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:37,584 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:29:37,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:29:37,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:37,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:29:37,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:37,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:29:37,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:37,585 DEBUG [hconnection-0x4d4966ac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:37,586 DEBUG [hconnection-0x3bdf662e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:37,587 DEBUG [hconnection-0x267aff80-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:37,588 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56864, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:37,589 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56884, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:37,592 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56874, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:37,604 DEBUG [hconnection-0x12dc2f25-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:37,605 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56886, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:37,606 DEBUG [hconnection-0x5905e3db-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T16:29:37,607 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56898, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T16:29:37,612 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212838130139f6447499b4fbafdbdec3664_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734020977580/Put/seqid=0 2024-12-12T16:29:37,614 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:37,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:37,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021037611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021037613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:37,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021037613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:37,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021037614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:37,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021037615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742373_1549 (size=12154) 2024-12-12T16:29:37,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T16:29:37,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:37,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021037716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:37,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021037716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,718 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,718 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T16:29:37,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:37,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:37,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:37,719 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:37,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:37,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:37,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021037717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:37,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021037718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:37,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:37,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021037720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T16:29:37,871 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,871 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T16:29:37,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:37,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:37,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:37,872 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:37,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:37,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:37,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:37,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021037918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:37,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021037919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:37,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021037921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:37,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021037923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:37,925 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:37,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021037924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:38,024 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:38,025 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T16:29:38,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:38,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:38,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:38,025 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:38,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:38,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:38,033 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:38,040 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212838130139f6447499b4fbafdbdec3664_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212838130139f6447499b4fbafdbdec3664_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:38,041 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/7b4f3d3543974dc890b6ad04694ed863, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:38,041 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/7b4f3d3543974dc890b6ad04694ed863 is 175, key is test_row_0/A:col10/1734020977580/Put/seqid=0 2024-12-12T16:29:38,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742374_1550 (size=30955) 2024-12-12T16:29:38,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T16:29:38,177 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:38,177 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T16:29:38,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:38,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:38,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:38,178 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:38,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:38,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:38,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:38,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021038221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:38,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:38,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021038221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:38,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:38,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021038225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:38,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:38,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021038225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:38,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:38,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021038226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:38,330 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:38,330 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T16:29:38,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:38,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:38,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:38,330 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:38,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:38,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:38,445 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/7b4f3d3543974dc890b6ad04694ed863 2024-12-12T16:29:38,468 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/eb87795c7e204c6eb70a48178eb3ccf6 is 50, key is test_row_0/B:col10/1734020977580/Put/seqid=0 2024-12-12T16:29:38,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742375_1551 (size=12001) 2024-12-12T16:29:38,483 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:38,483 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T16:29:38,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:38,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:38,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:38,483 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:38,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:38,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:38,635 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:38,636 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T16:29:38,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:38,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:38,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:38,636 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:38,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:38,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:38,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T16:29:38,725 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:38,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021038724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:38,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:38,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021038727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:38,730 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:38,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021038729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:38,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:38,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021038731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:38,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:38,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021038731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:38,788 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:38,789 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T16:29:38,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:38,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:38,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:38,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:38,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:38,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:38,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/eb87795c7e204c6eb70a48178eb3ccf6 2024-12-12T16:29:38,896 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/8ea123623d854fabb750f70bad5bc6e9 is 50, key is test_row_0/C:col10/1734020977580/Put/seqid=0 2024-12-12T16:29:38,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742376_1552 (size=12001) 2024-12-12T16:29:38,901 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/8ea123623d854fabb750f70bad5bc6e9 2024-12-12T16:29:38,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/7b4f3d3543974dc890b6ad04694ed863 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/7b4f3d3543974dc890b6ad04694ed863 2024-12-12T16:29:38,911 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/7b4f3d3543974dc890b6ad04694ed863, entries=150, sequenceid=15, filesize=30.2 K 2024-12-12T16:29:38,911 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/eb87795c7e204c6eb70a48178eb3ccf6 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/eb87795c7e204c6eb70a48178eb3ccf6 2024-12-12T16:29:38,914 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/eb87795c7e204c6eb70a48178eb3ccf6, entries=150, sequenceid=15, filesize=11.7 K 2024-12-12T16:29:38,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/8ea123623d854fabb750f70bad5bc6e9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/8ea123623d854fabb750f70bad5bc6e9 2024-12-12T16:29:38,918 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/8ea123623d854fabb750f70bad5bc6e9, entries=150, sequenceid=15, filesize=11.7 K 2024-12-12T16:29:38,919 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 8e89f008dc04dbad786e718ad49c4912 in 1335ms, sequenceid=15, compaction requested=false 2024-12-12T16:29:38,919 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-12T16:29:38,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:38,941 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:38,941 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-12T16:29:38,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:38,942 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T16:29:38,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:29:38,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:38,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:29:38,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:38,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:29:38,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:38,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212121c4ef367884d2bb57892ede3b8fa10_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734020977610/Put/seqid=0 2024-12-12T16:29:38,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742377_1553 (size=12154) 2024-12-12T16:29:39,228 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T16:29:39,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:39,359 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212121c4ef367884d2bb57892ede3b8fa10_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212121c4ef367884d2bb57892ede3b8fa10_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:39,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/b5695cd415bc453496f96202b2b8aea4, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:39,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/b5695cd415bc453496f96202b2b8aea4 is 175, key is test_row_0/A:col10/1734020977610/Put/seqid=0 2024-12-12T16:29:39,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742378_1554 (size=30955) 2024-12-12T16:29:39,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T16:29:39,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:39,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:39,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:39,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:39,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021039739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:39,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021039738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:39,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:39,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021039739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:39,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:39,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021039740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:39,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:39,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021039740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:39,764 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/b5695cd415bc453496f96202b2b8aea4 2024-12-12T16:29:39,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/bcf68fa73d154b309e3ea567efc3aa44 is 50, key is test_row_0/B:col10/1734020977610/Put/seqid=0 2024-12-12T16:29:39,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742379_1555 (size=12001) 2024-12-12T16:29:39,795 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/bcf68fa73d154b309e3ea567efc3aa44 2024-12-12T16:29:39,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/269d6a6beeb949308494961a577e9d23 is 50, key is test_row_0/C:col10/1734020977610/Put/seqid=0 2024-12-12T16:29:39,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742380_1556 (size=12001) 2024-12-12T16:29:39,839 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/269d6a6beeb949308494961a577e9d23 2024-12-12T16:29:39,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:39,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021039842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:39,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:39,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021039844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:39,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/b5695cd415bc453496f96202b2b8aea4 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/b5695cd415bc453496f96202b2b8aea4 2024-12-12T16:29:39,851 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/b5695cd415bc453496f96202b2b8aea4, entries=150, sequenceid=41, filesize=30.2 K 2024-12-12T16:29:39,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/bcf68fa73d154b309e3ea567efc3aa44 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/bcf68fa73d154b309e3ea567efc3aa44 2024-12-12T16:29:39,855 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/bcf68fa73d154b309e3ea567efc3aa44, entries=150, sequenceid=41, filesize=11.7 K 2024-12-12T16:29:39,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/269d6a6beeb949308494961a577e9d23 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/269d6a6beeb949308494961a577e9d23 2024-12-12T16:29:39,862 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/269d6a6beeb949308494961a577e9d23, entries=150, sequenceid=41, filesize=11.7 K 2024-12-12T16:29:39,863 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 8e89f008dc04dbad786e718ad49c4912 in 921ms, sequenceid=41, compaction requested=false 2024-12-12T16:29:39,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:39,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:39,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-12-12T16:29:39,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-12-12T16:29:39,866 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-12-12T16:29:39,867 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2980 sec 2024-12-12T16:29:39,869 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 2.3020 sec 2024-12-12T16:29:40,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:40,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:29:40,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:29:40,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:40,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:29:40,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:40,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:29:40,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:40,053 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121210bfd24428044411acc5a418de51b34e_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734020979736/Put/seqid=0 2024-12-12T16:29:40,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742381_1557 (size=12154) 2024-12-12T16:29:40,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:40,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:40,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021040104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:40,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021040103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:40,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:40,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021040207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:40,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:40,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021040207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:40,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:40,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021040410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:40,412 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:40,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021040411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:40,457 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:40,461 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121210bfd24428044411acc5a418de51b34e_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121210bfd24428044411acc5a418de51b34e_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:40,462 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/924ebe969c9841dc8a83f03efb333dca, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:40,463 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/924ebe969c9841dc8a83f03efb333dca is 175, key is test_row_0/A:col10/1734020979736/Put/seqid=0 2024-12-12T16:29:40,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742382_1558 (size=30955) 2024-12-12T16:29:40,468 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=52, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/924ebe969c9841dc8a83f03efb333dca 2024-12-12T16:29:40,477 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/70ff85e9604e40ea90a0f90722861dc7 is 50, key is test_row_0/B:col10/1734020979736/Put/seqid=0 2024-12-12T16:29:40,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742383_1559 (size=12001) 2024-12-12T16:29:40,482 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/70ff85e9604e40ea90a0f90722861dc7 2024-12-12T16:29:40,488 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/28340b73d061408cb579aa75ca1f353c is 50, key is test_row_0/C:col10/1734020979736/Put/seqid=0 2024-12-12T16:29:40,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742384_1560 (size=12001) 2024-12-12T16:29:40,492 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/28340b73d061408cb579aa75ca1f353c 2024-12-12T16:29:40,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/924ebe969c9841dc8a83f03efb333dca as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/924ebe969c9841dc8a83f03efb333dca 2024-12-12T16:29:40,499 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/924ebe969c9841dc8a83f03efb333dca, entries=150, sequenceid=52, filesize=30.2 K 2024-12-12T16:29:40,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/70ff85e9604e40ea90a0f90722861dc7 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/70ff85e9604e40ea90a0f90722861dc7 2024-12-12T16:29:40,503 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/70ff85e9604e40ea90a0f90722861dc7, entries=150, sequenceid=52, filesize=11.7 K 2024-12-12T16:29:40,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/28340b73d061408cb579aa75ca1f353c as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/28340b73d061408cb579aa75ca1f353c 2024-12-12T16:29:40,507 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/28340b73d061408cb579aa75ca1f353c, entries=150, sequenceid=52, filesize=11.7 K 2024-12-12T16:29:40,507 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 8e89f008dc04dbad786e718ad49c4912 in 461ms, sequenceid=52, compaction requested=true 2024-12-12T16:29:40,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:40,508 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:40,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:29:40,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:40,508 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:40,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:29:40,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:40,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:29:40,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:40,512 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:40,512 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/A is initiating minor compaction (all files) 2024-12-12T16:29:40,512 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/A in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:40,512 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/7b4f3d3543974dc890b6ad04694ed863, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/b5695cd415bc453496f96202b2b8aea4, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/924ebe969c9841dc8a83f03efb333dca] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=90.7 K 2024-12-12T16:29:40,512 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:40,512 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/7b4f3d3543974dc890b6ad04694ed863, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/b5695cd415bc453496f96202b2b8aea4, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/924ebe969c9841dc8a83f03efb333dca] 2024-12-12T16:29:40,513 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:40,513 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/B is initiating minor compaction (all files) 2024-12-12T16:29:40,513 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/B in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:40,513 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/eb87795c7e204c6eb70a48178eb3ccf6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/bcf68fa73d154b309e3ea567efc3aa44, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/70ff85e9604e40ea90a0f90722861dc7] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=35.2 K 2024-12-12T16:29:40,513 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b4f3d3543974dc890b6ad04694ed863, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1734020977580 2024-12-12T16:29:40,513 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting eb87795c7e204c6eb70a48178eb3ccf6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1734020977580 2024-12-12T16:29:40,514 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5695cd415bc453496f96202b2b8aea4, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734020977610 2024-12-12T16:29:40,514 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting bcf68fa73d154b309e3ea567efc3aa44, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734020977610 2024-12-12T16:29:40,514 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 924ebe969c9841dc8a83f03efb333dca, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734020979736 2024-12-12T16:29:40,514 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 70ff85e9604e40ea90a0f90722861dc7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734020979736 2024-12-12T16:29:40,522 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#B#compaction#479 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:40,523 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/0f93facd007c4284b26a4eedff2829cc is 50, key is test_row_0/B:col10/1734020979736/Put/seqid=0 2024-12-12T16:29:40,524 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:40,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742385_1561 (size=12104) 2024-12-12T16:29:40,536 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212c083012d42434cafa6194434f11b5994_8e89f008dc04dbad786e718ad49c4912 store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:40,539 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212c083012d42434cafa6194434f11b5994_8e89f008dc04dbad786e718ad49c4912, store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:40,539 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c083012d42434cafa6194434f11b5994_8e89f008dc04dbad786e718ad49c4912 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:40,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742386_1562 (size=4469) 2024-12-12T16:29:40,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:40,717 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T16:29:40,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:29:40,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:40,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:29:40,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:40,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:29:40,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:40,727 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123b1288a21cc348f787422d0215668a9a_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734020980715/Put/seqid=0 2024-12-12T16:29:40,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:40,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021040726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:40,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742387_1563 (size=12154) 2024-12-12T16:29:40,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:40,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021040728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:40,732 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:40,736 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123b1288a21cc348f787422d0215668a9a_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123b1288a21cc348f787422d0215668a9a_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:40,736 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/1cda643a71cb447097904fd5ccc4464c, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:40,737 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/1cda643a71cb447097904fd5ccc4464c is 175, key is test_row_0/A:col10/1734020980715/Put/seqid=0 2024-12-12T16:29:40,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742388_1564 (size=30955) 2024-12-12T16:29:40,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:40,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021040829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:40,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:40,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021040832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:40,937 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/0f93facd007c4284b26a4eedff2829cc as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/0f93facd007c4284b26a4eedff2829cc 2024-12-12T16:29:40,940 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/B of 8e89f008dc04dbad786e718ad49c4912 into 0f93facd007c4284b26a4eedff2829cc(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:40,940 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:40,940 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/B, priority=13, startTime=1734020980508; duration=0sec 2024-12-12T16:29:40,941 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:40,941 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:B 2024-12-12T16:29:40,941 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:40,941 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:40,942 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/C is initiating minor compaction (all files) 2024-12-12T16:29:40,942 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/C in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:40,942 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/8ea123623d854fabb750f70bad5bc6e9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/269d6a6beeb949308494961a577e9d23, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/28340b73d061408cb579aa75ca1f353c] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=35.2 K 2024-12-12T16:29:40,942 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ea123623d854fabb750f70bad5bc6e9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1734020977580 2024-12-12T16:29:40,942 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 269d6a6beeb949308494961a577e9d23, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734020977610 2024-12-12T16:29:40,943 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 28340b73d061408cb579aa75ca1f353c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734020979736 2024-12-12T16:29:40,947 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#A#compaction#480 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:40,947 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/7537cb8ddc66407181f1c7eca83d7f25 is 175, key is test_row_0/A:col10/1734020979736/Put/seqid=0 2024-12-12T16:29:40,948 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#C#compaction#482 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:40,949 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/151682da3b014cd5b3f5e8dbca5f8ba1 is 50, key is test_row_0/C:col10/1734020979736/Put/seqid=0 2024-12-12T16:29:40,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742389_1565 (size=31058) 2024-12-12T16:29:40,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742390_1566 (size=12104) 2024-12-12T16:29:40,956 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/7537cb8ddc66407181f1c7eca83d7f25 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/7537cb8ddc66407181f1c7eca83d7f25 2024-12-12T16:29:40,958 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/151682da3b014cd5b3f5e8dbca5f8ba1 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/151682da3b014cd5b3f5e8dbca5f8ba1 2024-12-12T16:29:40,960 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/A of 8e89f008dc04dbad786e718ad49c4912 into 7537cb8ddc66407181f1c7eca83d7f25(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:40,961 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:40,961 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/A, priority=13, startTime=1734020980508; duration=0sec 2024-12-12T16:29:40,961 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:40,961 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:A 2024-12-12T16:29:40,962 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/C of 8e89f008dc04dbad786e718ad49c4912 into 151682da3b014cd5b3f5e8dbca5f8ba1(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:40,962 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:40,962 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/C, priority=13, startTime=1734020980508; duration=0sec 2024-12-12T16:29:40,962 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:40,962 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:C 2024-12-12T16:29:41,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:41,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021041032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:41,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:41,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021041036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:41,142 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/1cda643a71cb447097904fd5ccc4464c 2024-12-12T16:29:41,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/5a8615a7661e4470bea02cc74a202bcf is 50, key is test_row_0/B:col10/1734020980715/Put/seqid=0 2024-12-12T16:29:41,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742391_1567 (size=12001) 2024-12-12T16:29:41,153 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/5a8615a7661e4470bea02cc74a202bcf 2024-12-12T16:29:41,159 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/52a33969a9094e9bad075fa208c29051 is 50, key is test_row_0/C:col10/1734020980715/Put/seqid=0 2024-12-12T16:29:41,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742392_1568 (size=12001) 2024-12-12T16:29:41,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:41,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021041334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:41,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:41,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021041338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:41,563 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/52a33969a9094e9bad075fa208c29051 2024-12-12T16:29:41,567 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/1cda643a71cb447097904fd5ccc4464c as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1cda643a71cb447097904fd5ccc4464c 2024-12-12T16:29:41,570 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1cda643a71cb447097904fd5ccc4464c, entries=150, sequenceid=79, filesize=30.2 K 2024-12-12T16:29:41,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/5a8615a7661e4470bea02cc74a202bcf as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5a8615a7661e4470bea02cc74a202bcf 2024-12-12T16:29:41,574 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5a8615a7661e4470bea02cc74a202bcf, entries=150, sequenceid=79, filesize=11.7 K 2024-12-12T16:29:41,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/52a33969a9094e9bad075fa208c29051 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/52a33969a9094e9bad075fa208c29051 2024-12-12T16:29:41,578 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/52a33969a9094e9bad075fa208c29051, entries=150, sequenceid=79, filesize=11.7 K 2024-12-12T16:29:41,579 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 8e89f008dc04dbad786e718ad49c4912 in 863ms, sequenceid=79, compaction requested=false 2024-12-12T16:29:41,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:41,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-12T16:29:41,671 INFO [Thread-2445 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-12-12T16:29:41,672 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:29:41,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-12-12T16:29:41,673 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:29:41,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-12T16:29:41,674 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:29:41,674 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:29:41,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:41,752 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:29:41,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:29:41,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:41,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:29:41,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:41,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:29:41,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:41,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412129f750deb8c2a462aa304e84ecbf71624_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734020981750/Put/seqid=0 2024-12-12T16:29:41,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742393_1569 (size=12154) 2024-12-12T16:29:41,774 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:41,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-12T16:29:41,779 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412129f750deb8c2a462aa304e84ecbf71624_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129f750deb8c2a462aa304e84ecbf71624_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:41,779 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/4b649d74af354b578e17ff30779f8a46, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:41,780 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/4b649d74af354b578e17ff30779f8a46 is 175, key is test_row_0/A:col10/1734020981750/Put/seqid=0 2024-12-12T16:29:41,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742394_1570 (size=30955) 2024-12-12T16:29:41,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:41,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021041785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:41,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:41,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021041785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:41,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:41,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021041787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:41,826 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:41,826 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-12T16:29:41,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:41,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:41,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:41,827 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:41,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:41,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:41,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:41,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021041838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:41,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:41,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021041840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:41,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:41,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021041888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:41,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:41,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021041888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:41,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:41,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021041890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:41,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-12T16:29:41,979 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:41,979 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-12T16:29:41,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:41,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:41,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:41,979 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:41,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:41,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:42,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:42,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021042090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:42,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:42,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021042091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:42,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:42,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021042093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:42,132 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:42,132 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-12T16:29:42,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:42,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:42,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:42,133 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:42,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:42,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:42,186 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/4b649d74af354b578e17ff30779f8a46 2024-12-12T16:29:42,193 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/5ba1c59dffdc447f97d3b82b0ef68086 is 50, key is test_row_0/B:col10/1734020981750/Put/seqid=0 2024-12-12T16:29:42,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742395_1571 (size=12001) 2024-12-12T16:29:42,197 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/5ba1c59dffdc447f97d3b82b0ef68086 2024-12-12T16:29:42,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/dee4f96e33e54cb09ffd20095be0abe2 is 50, key is test_row_0/C:col10/1734020981750/Put/seqid=0 2024-12-12T16:29:42,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742396_1572 (size=12001) 2024-12-12T16:29:42,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-12T16:29:42,285 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:42,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-12T16:29:42,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:42,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:42,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:42,285 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:42,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:42,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:42,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:42,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021042395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:42,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:42,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021042395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:42,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:42,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021042395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:42,437 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:42,438 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-12T16:29:42,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:42,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:42,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:42,438 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:42,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:42,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:42,590 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:42,591 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-12T16:29:42,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:42,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:42,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:42,591 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:42,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:42,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:42,607 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/dee4f96e33e54cb09ffd20095be0abe2 2024-12-12T16:29:42,611 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/4b649d74af354b578e17ff30779f8a46 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/4b649d74af354b578e17ff30779f8a46 2024-12-12T16:29:42,613 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/4b649d74af354b578e17ff30779f8a46, entries=150, sequenceid=93, filesize=30.2 K 2024-12-12T16:29:42,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/5ba1c59dffdc447f97d3b82b0ef68086 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5ba1c59dffdc447f97d3b82b0ef68086 2024-12-12T16:29:42,618 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5ba1c59dffdc447f97d3b82b0ef68086, entries=150, sequenceid=93, filesize=11.7 K 2024-12-12T16:29:42,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/dee4f96e33e54cb09ffd20095be0abe2 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/dee4f96e33e54cb09ffd20095be0abe2 2024-12-12T16:29:42,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/dee4f96e33e54cb09ffd20095be0abe2, entries=150, sequenceid=93, filesize=11.7 K 2024-12-12T16:29:42,623 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 8e89f008dc04dbad786e718ad49c4912 in 872ms, sequenceid=93, compaction requested=true 2024-12-12T16:29:42,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:42,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:29:42,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:42,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:29:42,623 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:42,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:42,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:29:42,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:42,623 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:42,624 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:42,624 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:42,624 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/B is initiating minor compaction (all files) 2024-12-12T16:29:42,624 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/A is initiating minor compaction (all files) 2024-12-12T16:29:42,624 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/B in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:42,624 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/A in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:42,624 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/7537cb8ddc66407181f1c7eca83d7f25, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1cda643a71cb447097904fd5ccc4464c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/4b649d74af354b578e17ff30779f8a46] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=90.8 K 2024-12-12T16:29:42,624 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/0f93facd007c4284b26a4eedff2829cc, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5a8615a7661e4470bea02cc74a202bcf, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5ba1c59dffdc447f97d3b82b0ef68086] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=35.3 K 2024-12-12T16:29:42,624 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:42,624 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/7537cb8ddc66407181f1c7eca83d7f25, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1cda643a71cb447097904fd5ccc4464c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/4b649d74af354b578e17ff30779f8a46] 2024-12-12T16:29:42,625 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f93facd007c4284b26a4eedff2829cc, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734020979736 2024-12-12T16:29:42,625 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7537cb8ddc66407181f1c7eca83d7f25, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734020979736 2024-12-12T16:29:42,625 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a8615a7661e4470bea02cc74a202bcf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734020980103 2024-12-12T16:29:42,625 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1cda643a71cb447097904fd5ccc4464c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734020980103 2024-12-12T16:29:42,625 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ba1c59dffdc447f97d3b82b0ef68086, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734020980727 2024-12-12T16:29:42,625 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b649d74af354b578e17ff30779f8a46, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734020980727 2024-12-12T16:29:42,631 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:42,633 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#B#compaction#488 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:42,634 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212c08791145ad7419492834e655cf73fe6_8e89f008dc04dbad786e718ad49c4912 store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:42,634 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/263113da39004d05a5d66232eeb271a4 is 50, key is test_row_0/B:col10/1734020981750/Put/seqid=0 2024-12-12T16:29:42,636 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212c08791145ad7419492834e655cf73fe6_8e89f008dc04dbad786e718ad49c4912, store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:42,636 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c08791145ad7419492834e655cf73fe6_8e89f008dc04dbad786e718ad49c4912 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:42,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742397_1573 (size=12207) 2024-12-12T16:29:42,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742398_1574 (size=4469) 2024-12-12T16:29:42,643 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#A#compaction#489 average throughput is 2.04 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:42,643 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/1f5cdee28c734e66bc9f5a102ca7f416 is 175, key is test_row_0/A:col10/1734020981750/Put/seqid=0 2024-12-12T16:29:42,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742399_1575 (size=31161) 2024-12-12T16:29:42,651 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/1f5cdee28c734e66bc9f5a102ca7f416 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1f5cdee28c734e66bc9f5a102ca7f416 2024-12-12T16:29:42,655 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/A of 8e89f008dc04dbad786e718ad49c4912 into 1f5cdee28c734e66bc9f5a102ca7f416(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:42,655 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:42,655 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/A, priority=13, startTime=1734020982623; duration=0sec 2024-12-12T16:29:42,655 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:42,655 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:A 2024-12-12T16:29:42,655 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:42,656 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:42,656 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/C is initiating minor compaction (all files) 2024-12-12T16:29:42,656 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/C in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:42,656 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/151682da3b014cd5b3f5e8dbca5f8ba1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/52a33969a9094e9bad075fa208c29051, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/dee4f96e33e54cb09ffd20095be0abe2] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=35.3 K 2024-12-12T16:29:42,657 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 151682da3b014cd5b3f5e8dbca5f8ba1, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734020979736 2024-12-12T16:29:42,657 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52a33969a9094e9bad075fa208c29051, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734020980103 2024-12-12T16:29:42,657 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting dee4f96e33e54cb09ffd20095be0abe2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734020980727 2024-12-12T16:29:42,664 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#C#compaction#490 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:42,664 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/4ff5ad155f1240089773022512fe2ef9 is 50, key is test_row_0/C:col10/1734020981750/Put/seqid=0 2024-12-12T16:29:42,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742400_1576 (size=12207) 2024-12-12T16:29:42,673 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/4ff5ad155f1240089773022512fe2ef9 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/4ff5ad155f1240089773022512fe2ef9 2024-12-12T16:29:42,677 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/C of 8e89f008dc04dbad786e718ad49c4912 into 4ff5ad155f1240089773022512fe2ef9(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:42,677 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:42,677 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/C, priority=13, startTime=1734020982623; duration=0sec 2024-12-12T16:29:42,677 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:42,677 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:C 2024-12-12T16:29:42,743 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:42,743 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-12T16:29:42,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:42,744 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T16:29:42,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:29:42,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:42,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:29:42,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:42,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:29:42,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:42,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212973bf18f655842fba491afe9a69a6da6_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734020981785/Put/seqid=0 2024-12-12T16:29:42,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742401_1577 (size=12154) 2024-12-12T16:29:42,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-12T16:29:42,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:42,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:42,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:42,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:42,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021042862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:42,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021042862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:42,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:42,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021042901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:42,904 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:42,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021042902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:42,904 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:42,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021042902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:42,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:42,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:42,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021042965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:42,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021042965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:43,046 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/263113da39004d05a5d66232eeb271a4 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/263113da39004d05a5d66232eeb271a4 2024-12-12T16:29:43,050 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/B of 8e89f008dc04dbad786e718ad49c4912 into 263113da39004d05a5d66232eeb271a4(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:43,050 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:43,050 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/B, priority=13, startTime=1734020982623; duration=0sec 2024-12-12T16:29:43,050 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:43,050 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:B 2024-12-12T16:29:43,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:43,159 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212973bf18f655842fba491afe9a69a6da6_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212973bf18f655842fba491afe9a69a6da6_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:43,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/b886420b8fac4b2ba98fdcb648622107, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:43,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/b886420b8fac4b2ba98fdcb648622107 is 175, key is test_row_0/A:col10/1734020981785/Put/seqid=0 2024-12-12T16:29:43,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742402_1578 (size=30955) 2024-12-12T16:29:43,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:43,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021043168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:43,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:43,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021043169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:43,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:43,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:43,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021043473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:43,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021043473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:43,564 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=120, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/b886420b8fac4b2ba98fdcb648622107 2024-12-12T16:29:43,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/e8bd9ba7fc784588aa18fb6bf78ac138 is 50, key is test_row_0/B:col10/1734020981785/Put/seqid=0 2024-12-12T16:29:43,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742403_1579 (size=12001) 2024-12-12T16:29:43,577 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/e8bd9ba7fc784588aa18fb6bf78ac138 2024-12-12T16:29:43,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/01382bac8ccc47aab08d79016f36feb6 is 50, key is test_row_0/C:col10/1734020981785/Put/seqid=0 2024-12-12T16:29:43,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742404_1580 (size=12001) 2024-12-12T16:29:43,587 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/01382bac8ccc47aab08d79016f36feb6 2024-12-12T16:29:43,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/b886420b8fac4b2ba98fdcb648622107 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/b886420b8fac4b2ba98fdcb648622107 2024-12-12T16:29:43,594 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/b886420b8fac4b2ba98fdcb648622107, entries=150, sequenceid=120, filesize=30.2 K 2024-12-12T16:29:43,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/e8bd9ba7fc784588aa18fb6bf78ac138 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/e8bd9ba7fc784588aa18fb6bf78ac138 2024-12-12T16:29:43,600 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/e8bd9ba7fc784588aa18fb6bf78ac138, entries=150, sequenceid=120, filesize=11.7 K 2024-12-12T16:29:43,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/01382bac8ccc47aab08d79016f36feb6 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/01382bac8ccc47aab08d79016f36feb6 2024-12-12T16:29:43,605 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/01382bac8ccc47aab08d79016f36feb6, entries=150, sequenceid=120, filesize=11.7 K 2024-12-12T16:29:43,605 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 8e89f008dc04dbad786e718ad49c4912 in 861ms, sequenceid=120, compaction requested=false 2024-12-12T16:29:43,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:43,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:43,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-12-12T16:29:43,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-12-12T16:29:43,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-12-12T16:29:43,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9320 sec 2024-12-12T16:29:43,610 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 1.9370 sec 2024-12-12T16:29:43,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-12T16:29:43,779 INFO [Thread-2445 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-12-12T16:29:43,780 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:29:43,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-12-12T16:29:43,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T16:29:43,782 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:29:43,783 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:29:43,783 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:29:43,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T16:29:43,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:43,906 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T16:29:43,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:29:43,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:43,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:29:43,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:43,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:29:43,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:43,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212876076b7e7e04cb7ac15c2987b396cec_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734020983905/Put/seqid=0 2024-12-12T16:29:43,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742405_1581 (size=12254) 2024-12-12T16:29:43,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:43,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021043929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:43,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:43,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021043930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:43,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:43,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021043931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:43,934 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:43,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-12T16:29:43,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:43,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:43,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:43,935 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:43,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:43,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:43,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:43,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021043976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:43,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:43,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021043977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:44,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:44,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021044034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:44,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:44,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021044034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:44,036 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:44,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021044034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:44,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T16:29:44,086 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:44,086 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-12T16:29:44,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:44,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:44,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:44,087 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:44,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:44,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:44,239 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:44,239 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:44,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021044237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:44,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-12T16:29:44,239 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:44,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021044238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:44,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:44,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:44,239 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:44,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:44,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021044238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:44,239 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:44,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:44,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:44,318 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:44,322 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212876076b7e7e04cb7ac15c2987b396cec_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212876076b7e7e04cb7ac15c2987b396cec_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:44,322 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/e8d15dd370774f928e74d455fb163584, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:44,323 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/e8d15dd370774f928e74d455fb163584 is 175, key is test_row_0/A:col10/1734020983905/Put/seqid=0 2024-12-12T16:29:44,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742406_1582 (size=31055) 2024-12-12T16:29:44,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T16:29:44,391 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:44,392 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-12T16:29:44,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:44,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:44,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:44,392 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:44,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:44,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:44,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:44,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021044541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:44,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:44,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021044542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:44,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:44,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021044542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:44,544 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:44,545 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-12T16:29:44,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:44,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:44,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:44,545 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:44,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:44,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:44,697 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:44,697 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-12T16:29:44,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:44,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:44,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:44,698 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:44,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:44,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:44,727 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=133, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/e8d15dd370774f928e74d455fb163584 2024-12-12T16:29:44,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/a65c5ed986774393b42df9f7056c7914 is 50, key is test_row_0/B:col10/1734020983905/Put/seqid=0 2024-12-12T16:29:44,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742407_1583 (size=12101) 2024-12-12T16:29:44,850 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:44,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-12T16:29:44,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:44,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:44,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:44,851 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:44,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:44,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:44,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T16:29:44,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:44,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021044986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:44,990 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:44,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021044988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:45,003 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:45,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-12T16:29:45,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:45,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:45,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:45,004 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:45,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:45,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:45,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:45,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021045046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:45,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:45,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021045047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:45,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:45,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021045047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:45,142 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/a65c5ed986774393b42df9f7056c7914 2024-12-12T16:29:45,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/374ea1dd6dfb492f94cca00d88517a7a is 50, key is test_row_0/C:col10/1734020983905/Put/seqid=0 2024-12-12T16:29:45,156 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:45,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742408_1584 (size=12101) 2024-12-12T16:29:45,156 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-12T16:29:45,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:45,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:45,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:45,157 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:45,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:45,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:45,309 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:45,310 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-12T16:29:45,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:45,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:45,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:45,310 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:45,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:45,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:45,462 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:45,463 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-12T16:29:45,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:45,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:45,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:45,463 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:45,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:45,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:45,557 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/374ea1dd6dfb492f94cca00d88517a7a 2024-12-12T16:29:45,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/e8d15dd370774f928e74d455fb163584 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/e8d15dd370774f928e74d455fb163584 2024-12-12T16:29:45,563 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/e8d15dd370774f928e74d455fb163584, entries=150, sequenceid=133, filesize=30.3 K 2024-12-12T16:29:45,564 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/a65c5ed986774393b42df9f7056c7914 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/a65c5ed986774393b42df9f7056c7914 2024-12-12T16:29:45,568 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/a65c5ed986774393b42df9f7056c7914, entries=150, sequenceid=133, filesize=11.8 K 2024-12-12T16:29:45,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/374ea1dd6dfb492f94cca00d88517a7a as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/374ea1dd6dfb492f94cca00d88517a7a 2024-12-12T16:29:45,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/374ea1dd6dfb492f94cca00d88517a7a, entries=150, sequenceid=133, filesize=11.8 K 2024-12-12T16:29:45,572 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 8e89f008dc04dbad786e718ad49c4912 in 1666ms, sequenceid=133, compaction requested=true 2024-12-12T16:29:45,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:45,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:29:45,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:45,572 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:45,572 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:45,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:29:45,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:45,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:29:45,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:45,573 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:45,573 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93171 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:45,573 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/B is initiating minor compaction (all files) 2024-12-12T16:29:45,573 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/A is initiating minor compaction (all files) 2024-12-12T16:29:45,573 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/B in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:45,573 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/A in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:45,573 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/263113da39004d05a5d66232eeb271a4, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/e8bd9ba7fc784588aa18fb6bf78ac138, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/a65c5ed986774393b42df9f7056c7914] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=35.5 K 2024-12-12T16:29:45,573 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1f5cdee28c734e66bc9f5a102ca7f416, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/b886420b8fac4b2ba98fdcb648622107, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/e8d15dd370774f928e74d455fb163584] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=91.0 K 2024-12-12T16:29:45,573 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:45,573 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1f5cdee28c734e66bc9f5a102ca7f416, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/b886420b8fac4b2ba98fdcb648622107, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/e8d15dd370774f928e74d455fb163584] 2024-12-12T16:29:45,574 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 263113da39004d05a5d66232eeb271a4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734020980727 2024-12-12T16:29:45,574 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f5cdee28c734e66bc9f5a102ca7f416, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734020980727 2024-12-12T16:29:45,574 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting b886420b8fac4b2ba98fdcb648622107, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1734020981783 2024-12-12T16:29:45,574 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting e8bd9ba7fc784588aa18fb6bf78ac138, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1734020981783 2024-12-12T16:29:45,574 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting a65c5ed986774393b42df9f7056c7914, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734020982853 2024-12-12T16:29:45,574 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8d15dd370774f928e74d455fb163584, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734020982853 2024-12-12T16:29:45,580 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#B#compaction#497 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:45,580 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/5b8a7328e7b8467fa5fa00f1dd6b92f2 is 50, key is test_row_0/B:col10/1734020983905/Put/seqid=0 2024-12-12T16:29:45,582 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:45,608 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212833d6999dfe34a1f9c849cf83de7d1b8_8e89f008dc04dbad786e718ad49c4912 store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:45,611 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212833d6999dfe34a1f9c849cf83de7d1b8_8e89f008dc04dbad786e718ad49c4912, store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:45,611 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212833d6999dfe34a1f9c849cf83de7d1b8_8e89f008dc04dbad786e718ad49c4912 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:45,616 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:45,616 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-12T16:29:45,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:45,616 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T16:29:45,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:29:45,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:45,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:29:45,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:45,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:29:45,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:45,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742409_1585 (size=12409) 2024-12-12T16:29:45,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742410_1586 (size=4469) 2024-12-12T16:29:45,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412124114228b49174eee9ebb8e35b8156e3e_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734020983925/Put/seqid=0 2024-12-12T16:29:45,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742411_1587 (size=12304) 2024-12-12T16:29:45,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:45,637 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412124114228b49174eee9ebb8e35b8156e3e_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124114228b49174eee9ebb8e35b8156e3e_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:45,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/c3b85297d86e44a688e051cb92750084, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:45,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/c3b85297d86e44a688e051cb92750084 is 175, key is test_row_0/A:col10/1734020983925/Put/seqid=0 2024-12-12T16:29:45,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742412_1588 (size=31105) 2024-12-12T16:29:45,646 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/c3b85297d86e44a688e051cb92750084 2024-12-12T16:29:45,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/35ed9744b44a4213bad32761bf468551 is 50, key is test_row_0/B:col10/1734020983925/Put/seqid=0 2024-12-12T16:29:45,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742413_1589 (size=12151) 2024-12-12T16:29:45,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T16:29:46,023 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/5b8a7328e7b8467fa5fa00f1dd6b92f2 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5b8a7328e7b8467fa5fa00f1dd6b92f2 2024-12-12T16:29:46,025 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#A#compaction#498 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:46,026 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/74d9010803c143dbbc1ac4d527ed84c5 is 175, key is test_row_0/A:col10/1734020983905/Put/seqid=0 2024-12-12T16:29:46,028 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/B of 8e89f008dc04dbad786e718ad49c4912 into 5b8a7328e7b8467fa5fa00f1dd6b92f2(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:46,028 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:46,028 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/B, priority=13, startTime=1734020985572; duration=0sec 2024-12-12T16:29:46,028 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:46,028 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:B 2024-12-12T16:29:46,028 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:46,030 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:46,030 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/C is initiating minor compaction (all files) 2024-12-12T16:29:46,030 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/C in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:46,030 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/4ff5ad155f1240089773022512fe2ef9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/01382bac8ccc47aab08d79016f36feb6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/374ea1dd6dfb492f94cca00d88517a7a] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=35.5 K 2024-12-12T16:29:46,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742414_1590 (size=31363) 2024-12-12T16:29:46,031 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ff5ad155f1240089773022512fe2ef9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734020980727 2024-12-12T16:29:46,031 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 01382bac8ccc47aab08d79016f36feb6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1734020981783 2024-12-12T16:29:46,031 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 374ea1dd6dfb492f94cca00d88517a7a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734020982853 2024-12-12T16:29:46,035 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/74d9010803c143dbbc1ac4d527ed84c5 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/74d9010803c143dbbc1ac4d527ed84c5 2024-12-12T16:29:46,039 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/A of 8e89f008dc04dbad786e718ad49c4912 into 74d9010803c143dbbc1ac4d527ed84c5(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:46,039 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:46,039 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/A, priority=13, startTime=1734020985572; duration=0sec 2024-12-12T16:29:46,039 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:46,039 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:A 2024-12-12T16:29:46,040 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#C#compaction#501 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:46,040 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/018267f7467f4b86814076139ad04ee6 is 50, key is test_row_0/C:col10/1734020983905/Put/seqid=0 2024-12-12T16:29:46,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742415_1591 (size=12409) 2024-12-12T16:29:46,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:46,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:46,063 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/35ed9744b44a4213bad32761bf468551 2024-12-12T16:29:46,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:46,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021046061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:46,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:46,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021046062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:46,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:46,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021046064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:46,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/80b934779eb34133a598dd8d5186f932 is 50, key is test_row_0/C:col10/1734020983925/Put/seqid=0 2024-12-12T16:29:46,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742416_1592 (size=12151) 2024-12-12T16:29:46,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:46,167 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:46,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021046165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:46,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021046165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:46,169 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:46,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021046166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:46,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:46,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021046368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:46,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:46,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021046369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:46,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:46,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021046370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:46,462 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/018267f7467f4b86814076139ad04ee6 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/018267f7467f4b86814076139ad04ee6 2024-12-12T16:29:46,467 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/C of 8e89f008dc04dbad786e718ad49c4912 into 018267f7467f4b86814076139ad04ee6(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:46,467 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:46,467 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/C, priority=13, startTime=1734020985572; duration=0sec 2024-12-12T16:29:46,467 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:46,467 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:C 2024-12-12T16:29:46,473 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/80b934779eb34133a598dd8d5186f932 2024-12-12T16:29:46,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/c3b85297d86e44a688e051cb92750084 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/c3b85297d86e44a688e051cb92750084 2024-12-12T16:29:46,480 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/c3b85297d86e44a688e051cb92750084, entries=150, sequenceid=157, filesize=30.4 K 2024-12-12T16:29:46,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/35ed9744b44a4213bad32761bf468551 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/35ed9744b44a4213bad32761bf468551 2024-12-12T16:29:46,485 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/35ed9744b44a4213bad32761bf468551, entries=150, sequenceid=157, filesize=11.9 K 2024-12-12T16:29:46,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/80b934779eb34133a598dd8d5186f932 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/80b934779eb34133a598dd8d5186f932 2024-12-12T16:29:46,488 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/80b934779eb34133a598dd8d5186f932, entries=150, sequenceid=157, filesize=11.9 K 2024-12-12T16:29:46,489 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 8e89f008dc04dbad786e718ad49c4912 in 873ms, sequenceid=157, compaction requested=false 2024-12-12T16:29:46,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:46,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:46,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-12-12T16:29:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-12-12T16:29:46,491 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-12-12T16:29:46,492 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7070 sec 2024-12-12T16:29:46,493 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 2.7120 sec 2024-12-12T16:29:46,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:46,673 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T16:29:46,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:29:46,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:46,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:29:46,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:46,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:29:46,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:46,680 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412120304b98f4a774b2aa07acc31803eceab_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734020986672/Put/seqid=0 2024-12-12T16:29:46,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742417_1593 (size=12304) 2024-12-12T16:29:46,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:46,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021046696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:46,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:46,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021046697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:46,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:46,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021046697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:46,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:46,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021046800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:46,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:46,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021046800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:46,802 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:46,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021046801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:47,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:47,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021046999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:47,002 DEBUG [Thread-2435 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:29:47,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:47,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021047003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:47,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:47,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021047004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:47,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:47,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021047005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:47,008 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:47,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021047006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:47,008 DEBUG [Thread-2443 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:29:47,085 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:47,088 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412120304b98f4a774b2aa07acc31803eceab_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120304b98f4a774b2aa07acc31803eceab_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:47,089 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/e8c758571db24b90bd544516b5f64bb8, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:47,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/e8c758571db24b90bd544516b5f64bb8 is 175, key is test_row_0/A:col10/1734020986672/Put/seqid=0 2024-12-12T16:29:47,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742418_1594 (size=31105) 2024-12-12T16:29:47,093 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=173, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/e8c758571db24b90bd544516b5f64bb8 2024-12-12T16:29:47,100 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/5c8a2cf94be5477a8abc66d9db0eca8c is 50, key is test_row_0/B:col10/1734020986672/Put/seqid=0 2024-12-12T16:29:47,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742419_1595 (size=12151) 2024-12-12T16:29:47,105 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/5c8a2cf94be5477a8abc66d9db0eca8c 2024-12-12T16:29:47,111 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/3b8b55346e884ca7b3d70653c95d6c2f is 50, key is test_row_0/C:col10/1734020986672/Put/seqid=0 2024-12-12T16:29:47,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742420_1596 (size=12151) 2024-12-12T16:29:47,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:47,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021047305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:47,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:47,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021047305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:47,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:47,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021047308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:47,528 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/3b8b55346e884ca7b3d70653c95d6c2f 2024-12-12T16:29:47,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/e8c758571db24b90bd544516b5f64bb8 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/e8c758571db24b90bd544516b5f64bb8 2024-12-12T16:29:47,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/e8c758571db24b90bd544516b5f64bb8, entries=150, sequenceid=173, filesize=30.4 K 2024-12-12T16:29:47,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/5c8a2cf94be5477a8abc66d9db0eca8c as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5c8a2cf94be5477a8abc66d9db0eca8c 2024-12-12T16:29:47,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5c8a2cf94be5477a8abc66d9db0eca8c, entries=150, sequenceid=173, filesize=11.9 K 2024-12-12T16:29:47,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/3b8b55346e884ca7b3d70653c95d6c2f as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/3b8b55346e884ca7b3d70653c95d6c2f 2024-12-12T16:29:47,543 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/3b8b55346e884ca7b3d70653c95d6c2f, entries=150, sequenceid=173, filesize=11.9 K 2024-12-12T16:29:47,544 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 8e89f008dc04dbad786e718ad49c4912 in 871ms, sequenceid=173, compaction requested=true 2024-12-12T16:29:47,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:47,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:29:47,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:47,545 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:47,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:29:47,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:47,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:29:47,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:47,545 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:47,546 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:47,546 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/B is initiating minor compaction (all files) 2024-12-12T16:29:47,546 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/B in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:47,546 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5b8a7328e7b8467fa5fa00f1dd6b92f2, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/35ed9744b44a4213bad32761bf468551, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5c8a2cf94be5477a8abc66d9db0eca8c] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=35.9 K 2024-12-12T16:29:47,546 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93573 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:47,546 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/A is initiating minor compaction (all files) 2024-12-12T16:29:47,546 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/A in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:47,546 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/74d9010803c143dbbc1ac4d527ed84c5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/c3b85297d86e44a688e051cb92750084, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/e8c758571db24b90bd544516b5f64bb8] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=91.4 K 2024-12-12T16:29:47,546 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:47,546 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/74d9010803c143dbbc1ac4d527ed84c5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/c3b85297d86e44a688e051cb92750084, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/e8c758571db24b90bd544516b5f64bb8] 2024-12-12T16:29:47,547 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b8a7328e7b8467fa5fa00f1dd6b92f2, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734020982853 2024-12-12T16:29:47,547 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 74d9010803c143dbbc1ac4d527ed84c5, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734020982853 2024-12-12T16:29:47,547 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 35ed9744b44a4213bad32761bf468551, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734020983925 2024-12-12T16:29:47,547 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3b85297d86e44a688e051cb92750084, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734020983925 2024-12-12T16:29:47,548 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c8a2cf94be5477a8abc66d9db0eca8c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1734020986054 2024-12-12T16:29:47,548 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8c758571db24b90bd544516b5f64bb8, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1734020986054 2024-12-12T16:29:47,560 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:47,562 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#B#compaction#506 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:47,562 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/c7c026f0f39d4902af2107f82b7de86f is 50, key is test_row_0/B:col10/1734020986672/Put/seqid=0 2024-12-12T16:29:47,568 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412129a0c5e5309a94e7f8ee022a9936ba302_8e89f008dc04dbad786e718ad49c4912 store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:47,569 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412129a0c5e5309a94e7f8ee022a9936ba302_8e89f008dc04dbad786e718ad49c4912, store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:47,569 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412129a0c5e5309a94e7f8ee022a9936ba302_8e89f008dc04dbad786e718ad49c4912 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:47,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742422_1598 (size=4469) 2024-12-12T16:29:47,591 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#A#compaction#507 average throughput is 0.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:47,591 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/5d5cfe87f1ea499d9bfdd84a63f5e53c is 175, key is test_row_0/A:col10/1734020986672/Put/seqid=0 2024-12-12T16:29:47,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742421_1597 (size=12561) 2024-12-12T16:29:47,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742423_1599 (size=31515) 2024-12-12T16:29:47,808 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T16:29:47,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:47,811 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T16:29:47,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:29:47,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:47,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:29:47,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:47,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:29:47,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:47,819 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126fb574a4569845ae94a5cc3072c3e38e_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734020987811/Put/seqid=0 2024-12-12T16:29:47,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742424_1600 (size=12304) 2024-12-12T16:29:47,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:47,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021047825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:47,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:47,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021047825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:47,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:47,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021047828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:47,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-12T16:29:47,887 INFO [Thread-2445 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-12-12T16:29:47,888 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:29:47,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-12-12T16:29:47,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-12T16:29:47,889 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:29:47,890 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:29:47,890 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:29:47,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:47,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021047930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:47,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:47,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021047930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:47,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:47,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021047931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:47,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-12T16:29:48,008 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/c7c026f0f39d4902af2107f82b7de86f as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/c7c026f0f39d4902af2107f82b7de86f 2024-12-12T16:29:48,012 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/B of 8e89f008dc04dbad786e718ad49c4912 into c7c026f0f39d4902af2107f82b7de86f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:48,012 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:48,013 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/B, priority=13, startTime=1734020987545; duration=0sec 2024-12-12T16:29:48,013 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:48,013 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:B 2024-12-12T16:29:48,013 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:48,013 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/5d5cfe87f1ea499d9bfdd84a63f5e53c as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/5d5cfe87f1ea499d9bfdd84a63f5e53c 2024-12-12T16:29:48,014 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:48,014 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/C is initiating minor compaction (all files) 2024-12-12T16:29:48,014 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/C in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:48,014 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/018267f7467f4b86814076139ad04ee6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/80b934779eb34133a598dd8d5186f932, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/3b8b55346e884ca7b3d70653c95d6c2f] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=35.9 K 2024-12-12T16:29:48,015 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 018267f7467f4b86814076139ad04ee6, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1734020982853 2024-12-12T16:29:48,015 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 80b934779eb34133a598dd8d5186f932, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734020983925 2024-12-12T16:29:48,015 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b8b55346e884ca7b3d70653c95d6c2f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1734020986054 2024-12-12T16:29:48,017 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/A of 8e89f008dc04dbad786e718ad49c4912 into 5d5cfe87f1ea499d9bfdd84a63f5e53c(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:48,017 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:48,017 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/A, priority=13, startTime=1734020987544; duration=0sec 2024-12-12T16:29:48,017 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:48,017 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:A 2024-12-12T16:29:48,022 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#C#compaction#509 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:48,023 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/bab55973620a4fbe86b1729e25768e31 is 50, key is test_row_0/C:col10/1734020986672/Put/seqid=0 2024-12-12T16:29:48,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742425_1601 (size=12561) 2024-12-12T16:29:48,041 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:48,042 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-12T16:29:48,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:48,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:48,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:48,042 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:48,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021048133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:48,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:48,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021048133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:48,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:48,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021048134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:48,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-12T16:29:48,194 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:48,194 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-12T16:29:48,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:48,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:48,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:48,195 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,224 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:48,228 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126fb574a4569845ae94a5cc3072c3e38e_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126fb574a4569845ae94a5cc3072c3e38e_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:48,229 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/6826f425852a4c63889c84009165580d, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:48,229 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/6826f425852a4c63889c84009165580d is 175, key is test_row_0/A:col10/1734020987811/Put/seqid=0 2024-12-12T16:29:48,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742426_1602 (size=31105) 2024-12-12T16:29:48,347 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:48,347 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-12T16:29:48,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:48,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:48,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:48,348 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,431 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/bab55973620a4fbe86b1729e25768e31 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/bab55973620a4fbe86b1729e25768e31 2024-12-12T16:29:48,435 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/C of 8e89f008dc04dbad786e718ad49c4912 into bab55973620a4fbe86b1729e25768e31(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:48,435 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:48,435 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/C, priority=13, startTime=1734020987545; duration=0sec 2024-12-12T16:29:48,435 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:48,435 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:C 2024-12-12T16:29:48,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:48,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021048436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:48,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:48,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021048437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:48,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:48,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021048438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:48,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-12T16:29:48,500 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:48,500 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-12T16:29:48,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:48,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:48,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:48,500 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,633 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=198, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/6826f425852a4c63889c84009165580d 2024-12-12T16:29:48,640 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/df908cbd137d45f2914985cb2f10006a is 50, key is test_row_0/B:col10/1734020987811/Put/seqid=0 2024-12-12T16:29:48,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742427_1603 (size=12151) 2024-12-12T16:29:48,653 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:48,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-12T16:29:48,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:48,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:48,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:48,654 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,660 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/df908cbd137d45f2914985cb2f10006a 2024-12-12T16:29:48,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/cc51d57a88c24064ad658584757f91b6 is 50, key is test_row_0/C:col10/1734020987811/Put/seqid=0 2024-12-12T16:29:48,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742428_1604 (size=12151) 2024-12-12T16:29:48,806 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:48,806 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-12T16:29:48,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:48,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:48,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:48,806 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:48,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021048939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:48,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:48,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021048942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:48,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:48,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021048942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:48,958 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:48,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-12T16:29:48,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:48,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:48,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:48,959 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:48,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-12T16:29:49,075 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/cc51d57a88c24064ad658584757f91b6 2024-12-12T16:29:49,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/6826f425852a4c63889c84009165580d as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/6826f425852a4c63889c84009165580d 2024-12-12T16:29:49,086 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/6826f425852a4c63889c84009165580d, entries=150, sequenceid=198, filesize=30.4 K 2024-12-12T16:29:49,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/df908cbd137d45f2914985cb2f10006a as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/df908cbd137d45f2914985cb2f10006a 2024-12-12T16:29:49,091 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/df908cbd137d45f2914985cb2f10006a, entries=150, sequenceid=198, filesize=11.9 K 2024-12-12T16:29:49,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/cc51d57a88c24064ad658584757f91b6 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/cc51d57a88c24064ad658584757f91b6 2024-12-12T16:29:49,094 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/cc51d57a88c24064ad658584757f91b6, entries=150, sequenceid=198, filesize=11.9 K 2024-12-12T16:29:49,095 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 8e89f008dc04dbad786e718ad49c4912 in 1284ms, sequenceid=198, compaction requested=false 2024-12-12T16:29:49,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:49,111 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:49,111 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-12T16:29:49,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:49,112 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T16:29:49,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:29:49,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:49,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:29:49,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:49,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:29:49,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:49,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212b85ff5ba5a7d4ab2894eae08b9ac988d_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734020987824/Put/seqid=0 2024-12-12T16:29:49,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742429_1605 (size=12304) 2024-12-12T16:29:49,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:49,128 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212b85ff5ba5a7d4ab2894eae08b9ac988d_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212b85ff5ba5a7d4ab2894eae08b9ac988d_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:49,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/ae94fbcf9b2847ed898777114040c9e0, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:49,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/ae94fbcf9b2847ed898777114040c9e0 is 175, key is test_row_0/A:col10/1734020987824/Put/seqid=0 2024-12-12T16:29:49,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742430_1606 (size=31105) 2024-12-12T16:29:49,534 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=213, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/ae94fbcf9b2847ed898777114040c9e0 2024-12-12T16:29:49,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/16c8847be2a6448e90c9dccc4425b299 is 50, key is test_row_0/B:col10/1734020987824/Put/seqid=0 2024-12-12T16:29:49,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742431_1607 (size=12151) 2024-12-12T16:29:49,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:49,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:49,946 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/16c8847be2a6448e90c9dccc4425b299 2024-12-12T16:29:49,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/01a7d3b3eb1d494a8fca6282d14fe067 is 50, key is test_row_0/C:col10/1734020987824/Put/seqid=0 2024-12-12T16:29:49,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742432_1608 (size=12151) 2024-12-12T16:29:49,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:49,976 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:49,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021049975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:49,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021049974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:49,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:49,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021049975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:49,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-12T16:29:50,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:50,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021050077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:50,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:50,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021050077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:50,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:50,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021050077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:50,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:50,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021050281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:50,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:50,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021050281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:50,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:50,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021050282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:50,357 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/01a7d3b3eb1d494a8fca6282d14fe067 2024-12-12T16:29:50,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/ae94fbcf9b2847ed898777114040c9e0 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/ae94fbcf9b2847ed898777114040c9e0 2024-12-12T16:29:50,364 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/ae94fbcf9b2847ed898777114040c9e0, entries=150, sequenceid=213, filesize=30.4 K 2024-12-12T16:29:50,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/16c8847be2a6448e90c9dccc4425b299 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/16c8847be2a6448e90c9dccc4425b299 2024-12-12T16:29:50,368 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/16c8847be2a6448e90c9dccc4425b299, entries=150, sequenceid=213, filesize=11.9 K 2024-12-12T16:29:50,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/01a7d3b3eb1d494a8fca6282d14fe067 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/01a7d3b3eb1d494a8fca6282d14fe067 2024-12-12T16:29:50,372 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/01a7d3b3eb1d494a8fca6282d14fe067, entries=150, sequenceid=213, filesize=11.9 K 2024-12-12T16:29:50,373 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 8e89f008dc04dbad786e718ad49c4912 in 1261ms, sequenceid=213, compaction requested=true 2024-12-12T16:29:50,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:50,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:50,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-12-12T16:29:50,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-12-12T16:29:50,375 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-12T16:29:50,375 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4840 sec 2024-12-12T16:29:50,376 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 2.4870 sec 2024-12-12T16:29:50,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:50,587 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T16:29:50,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:29:50,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:50,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:29:50,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:50,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:29:50,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:50,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412120c545e92852546a19c9a7c540dac65be_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734020989973/Put/seqid=0 2024-12-12T16:29:50,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:50,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021050596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:50,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:50,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021050597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:50,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:50,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021050598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:50,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742433_1609 (size=14794) 2024-12-12T16:29:50,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:50,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021050699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:50,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:50,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021050699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:50,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:50,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021050700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:50,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:50,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021050902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:50,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:50,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021050903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:50,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:50,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021050903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:51,006 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:51,009 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412120c545e92852546a19c9a7c540dac65be_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120c545e92852546a19c9a7c540dac65be_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:51,010 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/0101e4f6bec44d5383449c83948809a2, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:51,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/0101e4f6bec44d5383449c83948809a2 is 175, key is test_row_0/A:col10/1734020989973/Put/seqid=0 2024-12-12T16:29:51,011 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:51,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56864 deadline: 1734021051010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:51,011 DEBUG [Thread-2443 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8150 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:29:51,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742434_1610 (size=39749) 2024-12-12T16:29:51,015 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=239, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/0101e4f6bec44d5383449c83948809a2 2024-12-12T16:29:51,021 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/6c0b529732284234bc8883b84084db6b is 50, key is test_row_0/B:col10/1734020989973/Put/seqid=0 2024-12-12T16:29:51,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:51,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56832 deadline: 1734021051021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:51,024 DEBUG [Thread-2435 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8163 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., hostname=4f6a4780a2f6,41933,1734020809476, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T16:29:51,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742435_1611 (size=12151) 2024-12-12T16:29:51,025 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/6c0b529732284234bc8883b84084db6b 2024-12-12T16:29:51,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/88df406208f24090a14f19f7a240d3fb is 50, key is test_row_0/C:col10/1734020989973/Put/seqid=0 2024-12-12T16:29:51,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742436_1612 (size=12151) 2024-12-12T16:29:51,034 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/88df406208f24090a14f19f7a240d3fb 2024-12-12T16:29:51,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/0101e4f6bec44d5383449c83948809a2 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/0101e4f6bec44d5383449c83948809a2 2024-12-12T16:29:51,041 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/0101e4f6bec44d5383449c83948809a2, entries=200, sequenceid=239, filesize=38.8 K 2024-12-12T16:29:51,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/6c0b529732284234bc8883b84084db6b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/6c0b529732284234bc8883b84084db6b 2024-12-12T16:29:51,045 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/6c0b529732284234bc8883b84084db6b, entries=150, sequenceid=239, filesize=11.9 K 2024-12-12T16:29:51,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/88df406208f24090a14f19f7a240d3fb as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/88df406208f24090a14f19f7a240d3fb 2024-12-12T16:29:51,049 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/88df406208f24090a14f19f7a240d3fb, entries=150, sequenceid=239, filesize=11.9 K 2024-12-12T16:29:51,050 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 8e89f008dc04dbad786e718ad49c4912 in 463ms, sequenceid=239, compaction requested=true 2024-12-12T16:29:51,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:51,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:29:51,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:51,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:29:51,050 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:29:51,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:51,050 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:29:51,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:29:51,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:51,053 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:29:51,053 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133474 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:29:51,053 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/B is initiating minor compaction (all files) 2024-12-12T16:29:51,053 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/A is initiating minor compaction (all files) 2024-12-12T16:29:51,053 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/B in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:51,053 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/A in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:51,053 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/c7c026f0f39d4902af2107f82b7de86f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/df908cbd137d45f2914985cb2f10006a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/16c8847be2a6448e90c9dccc4425b299, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/6c0b529732284234bc8883b84084db6b] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=47.9 K 2024-12-12T16:29:51,053 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/5d5cfe87f1ea499d9bfdd84a63f5e53c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/6826f425852a4c63889c84009165580d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/ae94fbcf9b2847ed898777114040c9e0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/0101e4f6bec44d5383449c83948809a2] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=130.3 K 2024-12-12T16:29:51,053 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:51,053 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/5d5cfe87f1ea499d9bfdd84a63f5e53c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/6826f425852a4c63889c84009165580d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/ae94fbcf9b2847ed898777114040c9e0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/0101e4f6bec44d5383449c83948809a2] 2024-12-12T16:29:51,056 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting c7c026f0f39d4902af2107f82b7de86f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1734020986054 2024-12-12T16:29:51,056 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting df908cbd137d45f2914985cb2f10006a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1734020986696 2024-12-12T16:29:51,057 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 16c8847be2a6448e90c9dccc4425b299, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1734020987820 2024-12-12T16:29:51,057 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c0b529732284234bc8883b84084db6b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1734020989973 2024-12-12T16:29:51,061 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d5cfe87f1ea499d9bfdd84a63f5e53c, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1734020986054 2024-12-12T16:29:51,062 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6826f425852a4c63889c84009165580d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1734020986696 2024-12-12T16:29:51,062 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae94fbcf9b2847ed898777114040c9e0, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1734020987820 2024-12-12T16:29:51,062 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0101e4f6bec44d5383449c83948809a2, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1734020989973 2024-12-12T16:29:51,070 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#B#compaction#518 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:51,071 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/cae37e331f124bb686c022dabd68a526 is 50, key is test_row_0/B:col10/1734020989973/Put/seqid=0 2024-12-12T16:29:51,073 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:51,084 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212bb7b2e30cc4d4c2abee101859f6f9730_8e89f008dc04dbad786e718ad49c4912 store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:51,087 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212bb7b2e30cc4d4c2abee101859f6f9730_8e89f008dc04dbad786e718ad49c4912, store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:51,087 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212bb7b2e30cc4d4c2abee101859f6f9730_8e89f008dc04dbad786e718ad49c4912 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:51,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742437_1613 (size=12697) 2024-12-12T16:29:51,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742438_1614 (size=4469) 2024-12-12T16:29:51,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:51,209 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T16:29:51,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:29:51,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:51,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:29:51,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:51,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:29:51,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:51,219 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121249ecd3516a204b589db131139d6e942e_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734020991208/Put/seqid=0 2024-12-12T16:29:51,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742439_1615 (size=12304) 2024-12-12T16:29:51,226 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:51,229 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121249ecd3516a204b589db131139d6e942e_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121249ecd3516a204b589db131139d6e942e_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:51,230 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/82242a442ca448c3ae161a3aadaeffb6, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:51,231 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/82242a442ca448c3ae161a3aadaeffb6 is 175, key is test_row_0/A:col10/1734020991208/Put/seqid=0 2024-12-12T16:29:51,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742440_1616 (size=31105) 2024-12-12T16:29:51,235 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=252, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/82242a442ca448c3ae161a3aadaeffb6 2024-12-12T16:29:51,241 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/cd06d18fbe2648cca6ac7fe960a70ea2 is 50, key is test_row_0/B:col10/1734020991208/Put/seqid=0 2024-12-12T16:29:51,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:51,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021051239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:51,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:51,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021051240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:51,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742441_1617 (size=12151) 2024-12-12T16:29:51,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:51,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021051241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:51,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:51,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021051344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:51,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:51,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021051344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:51,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:51,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021051346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:51,495 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/cae37e331f124bb686c022dabd68a526 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/cae37e331f124bb686c022dabd68a526 2024-12-12T16:29:51,495 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#A#compaction#519 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:51,496 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/1e8a462c57b1439fbc9e5555c161d3e6 is 175, key is test_row_0/A:col10/1734020989973/Put/seqid=0 2024-12-12T16:29:51,501 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/B of 8e89f008dc04dbad786e718ad49c4912 into cae37e331f124bb686c022dabd68a526(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:51,501 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:51,501 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/B, priority=12, startTime=1734020991050; duration=0sec 2024-12-12T16:29:51,501 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:51,501 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:B 2024-12-12T16:29:51,502 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:29:51,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742442_1618 (size=31651) 2024-12-12T16:29:51,503 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:29:51,503 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/C is initiating minor compaction (all files) 2024-12-12T16:29:51,503 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/C in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:51,503 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/bab55973620a4fbe86b1729e25768e31, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/cc51d57a88c24064ad658584757f91b6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/01a7d3b3eb1d494a8fca6282d14fe067, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/88df406208f24090a14f19f7a240d3fb] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=47.9 K 2024-12-12T16:29:51,503 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting bab55973620a4fbe86b1729e25768e31, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1734020986054 2024-12-12T16:29:51,504 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting cc51d57a88c24064ad658584757f91b6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1734020986696 2024-12-12T16:29:51,505 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 01a7d3b3eb1d494a8fca6282d14fe067, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1734020987820 2024-12-12T16:29:51,505 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 88df406208f24090a14f19f7a240d3fb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1734020989973 2024-12-12T16:29:51,508 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/1e8a462c57b1439fbc9e5555c161d3e6 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1e8a462c57b1439fbc9e5555c161d3e6 2024-12-12T16:29:51,515 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/A of 8e89f008dc04dbad786e718ad49c4912 into 1e8a462c57b1439fbc9e5555c161d3e6(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:51,515 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:51,515 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/A, priority=12, startTime=1734020991050; duration=0sec 2024-12-12T16:29:51,515 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:51,515 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:A 2024-12-12T16:29:51,516 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#C#compaction#522 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:51,517 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/863fdf10abca478788a7dfb9d91e3572 is 50, key is test_row_0/C:col10/1734020989973/Put/seqid=0 2024-12-12T16:29:51,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742443_1619 (size=12697) 2024-12-12T16:29:51,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:51,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021051549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:51,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:51,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021051549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:51,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:51,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021051550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:51,645 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/cd06d18fbe2648cca6ac7fe960a70ea2 2024-12-12T16:29:51,655 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/ee3593518ab24d149aae175f3a38e7c3 is 50, key is test_row_0/C:col10/1734020991208/Put/seqid=0 2024-12-12T16:29:51,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742444_1620 (size=12151) 2024-12-12T16:29:51,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:51,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021051853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:51,855 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:51,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021051854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:51,855 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:51,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021051854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:51,937 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/863fdf10abca478788a7dfb9d91e3572 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/863fdf10abca478788a7dfb9d91e3572 2024-12-12T16:29:51,941 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/C of 8e89f008dc04dbad786e718ad49c4912 into 863fdf10abca478788a7dfb9d91e3572(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:51,941 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:51,941 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/C, priority=12, startTime=1734020991050; duration=0sec 2024-12-12T16:29:51,941 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:51,941 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:C 2024-12-12T16:29:51,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-12T16:29:51,994 INFO [Thread-2445 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-12-12T16:29:51,996 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:29:51,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-12-12T16:29:51,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-12T16:29:51,997 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:29:51,998 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:29:51,998 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:29:52,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/ee3593518ab24d149aae175f3a38e7c3 2024-12-12T16:29:52,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/82242a442ca448c3ae161a3aadaeffb6 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/82242a442ca448c3ae161a3aadaeffb6 2024-12-12T16:29:52,085 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/82242a442ca448c3ae161a3aadaeffb6, entries=150, sequenceid=252, filesize=30.4 K 2024-12-12T16:29:52,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/cd06d18fbe2648cca6ac7fe960a70ea2 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/cd06d18fbe2648cca6ac7fe960a70ea2 2024-12-12T16:29:52,089 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/cd06d18fbe2648cca6ac7fe960a70ea2, entries=150, sequenceid=252, filesize=11.9 K 2024-12-12T16:29:52,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/ee3593518ab24d149aae175f3a38e7c3 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/ee3593518ab24d149aae175f3a38e7c3 2024-12-12T16:29:52,093 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/ee3593518ab24d149aae175f3a38e7c3, entries=150, sequenceid=252, filesize=11.9 K 2024-12-12T16:29:52,094 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 8e89f008dc04dbad786e718ad49c4912 in 886ms, sequenceid=252, compaction requested=false 2024-12-12T16:29:52,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:52,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-12T16:29:52,150 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:52,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-12T16:29:52,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:52,151 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T16:29:52,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:29:52,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:52,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:29:52,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:52,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:29:52,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:52,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126ae32fb6d53d446f8e25c2d476503225_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734020991240/Put/seqid=0 2024-12-12T16:29:52,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742445_1621 (size=12454) 2024-12-12T16:29:52,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-12T16:29:52,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:52,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:52,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:52,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021052366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:52,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:52,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021052368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:52,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:52,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021052369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:52,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:52,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021052470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:52,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:52,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021052471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:52,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:52,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021052472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:52,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:52,565 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126ae32fb6d53d446f8e25c2d476503225_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126ae32fb6d53d446f8e25c2d476503225_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:52,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/3c24002ff14d4cf6babe7fb6573798ca, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:52,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/3c24002ff14d4cf6babe7fb6573798ca is 175, key is test_row_0/A:col10/1734020991240/Put/seqid=0 2024-12-12T16:29:52,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742446_1622 (size=31255) 2024-12-12T16:29:52,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-12T16:29:52,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:52,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021052674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:52,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:52,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021052676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:52,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:52,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021052678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:52,970 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=278, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/3c24002ff14d4cf6babe7fb6573798ca 2024-12-12T16:29:52,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/10bcae9a186246de8b14d1f027fa8789 is 50, key is test_row_0/B:col10/1734020991240/Put/seqid=0 2024-12-12T16:29:52,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:52,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021052977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:52,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:52,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021052979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:52,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:52,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021052981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:52,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742447_1623 (size=12301) 2024-12-12T16:29:53,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-12T16:29:53,385 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/10bcae9a186246de8b14d1f027fa8789 2024-12-12T16:29:53,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/6450c807d5274138a897697ec5b6d47e is 50, key is test_row_0/C:col10/1734020991240/Put/seqid=0 2024-12-12T16:29:53,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742448_1624 (size=12301) 2024-12-12T16:29:53,395 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/6450c807d5274138a897697ec5b6d47e 2024-12-12T16:29:53,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/3c24002ff14d4cf6babe7fb6573798ca as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/3c24002ff14d4cf6babe7fb6573798ca 2024-12-12T16:29:53,403 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/3c24002ff14d4cf6babe7fb6573798ca, entries=150, sequenceid=278, filesize=30.5 K 2024-12-12T16:29:53,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/10bcae9a186246de8b14d1f027fa8789 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/10bcae9a186246de8b14d1f027fa8789 2024-12-12T16:29:53,408 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/10bcae9a186246de8b14d1f027fa8789, entries=150, sequenceid=278, filesize=12.0 K 2024-12-12T16:29:53,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/6450c807d5274138a897697ec5b6d47e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/6450c807d5274138a897697ec5b6d47e 2024-12-12T16:29:53,412 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/6450c807d5274138a897697ec5b6d47e, entries=150, sequenceid=278, filesize=12.0 K 2024-12-12T16:29:53,412 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 8e89f008dc04dbad786e718ad49c4912 in 1261ms, sequenceid=278, compaction requested=true 2024-12-12T16:29:53,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:53,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:53,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-12-12T16:29:53,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-12-12T16:29:53,415 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-12-12T16:29:53,415 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4160 sec 2024-12-12T16:29:53,416 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 1.4190 sec 2024-12-12T16:29:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:53,483 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T16:29:53,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:29:53,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:53,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:29:53,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:53,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:29:53,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:53,490 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412127f536cda3b9c466fae559322d0f9d52e_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734020992358/Put/seqid=0 2024-12-12T16:29:53,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742449_1625 (size=14994) 2024-12-12T16:29:53,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:53,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021053508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:53,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:53,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021053511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:53,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:53,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021053511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:53,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:53,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021053612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:53,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:53,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021053615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:53,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:53,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021053615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:53,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:53,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021053817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:53,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:53,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021053819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:53,821 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:53,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021053820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:53,894 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:53,897 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412127f536cda3b9c466fae559322d0f9d52e_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127f536cda3b9c466fae559322d0f9d52e_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:53,898 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/26971657f3a2439aa2b6198169c470a8, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:53,899 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/26971657f3a2439aa2b6198169c470a8 is 175, key is test_row_0/A:col10/1734020992358/Put/seqid=0 2024-12-12T16:29:53,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742450_1626 (size=39949) 2024-12-12T16:29:54,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-12T16:29:54,101 INFO [Thread-2445 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-12-12T16:29:54,102 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:29:54,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-12-12T16:29:54,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-12T16:29:54,104 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:29:54,104 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:29:54,104 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:29:54,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:54,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021054121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:54,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:54,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021054123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:54,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:54,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021054123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:54,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-12T16:29:54,256 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:54,256 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-12T16:29:54,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:54,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:54,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:54,256 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:54,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:54,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:54,303 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=292, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/26971657f3a2439aa2b6198169c470a8 2024-12-12T16:29:54,309 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/aa8f5633e2e642108f25bb1814c8210a is 50, key is test_row_0/B:col10/1734020992358/Put/seqid=0 2024-12-12T16:29:54,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742451_1627 (size=12301) 2024-12-12T16:29:54,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-12T16:29:54,408 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:54,409 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-12T16:29:54,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:54,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:54,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:54,409 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:54,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:54,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:54,561 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:54,561 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-12T16:29:54,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:54,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:54,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:54,562 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:54,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:54,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:54,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:54,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021054625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:54,628 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:54,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021054626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:54,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:54,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021054627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:54,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-12T16:29:54,713 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/aa8f5633e2e642108f25bb1814c8210a 2024-12-12T16:29:54,714 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:54,714 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-12T16:29:54,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:54,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:54,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:54,714 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:54,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:54,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:54,721 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/c4f59680a7f843e980cda773ccda3699 is 50, key is test_row_0/C:col10/1734020992358/Put/seqid=0 2024-12-12T16:29:54,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742452_1628 (size=12301) 2024-12-12T16:29:54,867 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:54,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-12T16:29:54,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:54,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:54,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:54,867 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:54,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:54,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:55,019 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:55,020 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-12T16:29:55,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:55,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:55,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:55,020 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:55,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:55,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:55,125 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/c4f59680a7f843e980cda773ccda3699 2024-12-12T16:29:55,129 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/26971657f3a2439aa2b6198169c470a8 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/26971657f3a2439aa2b6198169c470a8 2024-12-12T16:29:55,132 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/26971657f3a2439aa2b6198169c470a8, entries=200, sequenceid=292, filesize=39.0 K 2024-12-12T16:29:55,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/aa8f5633e2e642108f25bb1814c8210a as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/aa8f5633e2e642108f25bb1814c8210a 2024-12-12T16:29:55,136 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/aa8f5633e2e642108f25bb1814c8210a, entries=150, sequenceid=292, filesize=12.0 K 2024-12-12T16:29:55,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/c4f59680a7f843e980cda773ccda3699 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/c4f59680a7f843e980cda773ccda3699 2024-12-12T16:29:55,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/c4f59680a7f843e980cda773ccda3699, entries=150, sequenceid=292, filesize=12.0 K 2024-12-12T16:29:55,140 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 8e89f008dc04dbad786e718ad49c4912 in 1657ms, sequenceid=292, compaction requested=true 2024-12-12T16:29:55,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:55,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:29:55,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:55,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:29:55,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:55,141 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:29:55,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:29:55,141 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:29:55,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:55,142 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133960 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:29:55,142 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/A is initiating minor compaction (all files) 2024-12-12T16:29:55,142 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:29:55,142 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/B is initiating minor compaction (all files) 2024-12-12T16:29:55,142 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/A in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:55,142 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/B in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:55,142 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/cae37e331f124bb686c022dabd68a526, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/cd06d18fbe2648cca6ac7fe960a70ea2, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/10bcae9a186246de8b14d1f027fa8789, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/aa8f5633e2e642108f25bb1814c8210a] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=48.3 K 2024-12-12T16:29:55,142 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1e8a462c57b1439fbc9e5555c161d3e6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/82242a442ca448c3ae161a3aadaeffb6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/3c24002ff14d4cf6babe7fb6573798ca, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/26971657f3a2439aa2b6198169c470a8] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=130.8 K 2024-12-12T16:29:55,142 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:55,142 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1e8a462c57b1439fbc9e5555c161d3e6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/82242a442ca448c3ae161a3aadaeffb6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/3c24002ff14d4cf6babe7fb6573798ca, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/26971657f3a2439aa2b6198169c470a8] 2024-12-12T16:29:55,142 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting cae37e331f124bb686c022dabd68a526, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1734020989973 2024-12-12T16:29:55,142 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e8a462c57b1439fbc9e5555c161d3e6, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1734020989973 2024-12-12T16:29:55,143 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82242a442ca448c3ae161a3aadaeffb6, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1734020990597 2024-12-12T16:29:55,143 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting cd06d18fbe2648cca6ac7fe960a70ea2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1734020990597 2024-12-12T16:29:55,143 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c24002ff14d4cf6babe7fb6573798ca, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1734020991233 2024-12-12T16:29:55,143 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 10bcae9a186246de8b14d1f027fa8789, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1734020991233 2024-12-12T16:29:55,144 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26971657f3a2439aa2b6198169c470a8, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1734020992358 2024-12-12T16:29:55,144 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting aa8f5633e2e642108f25bb1814c8210a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1734020992358 2024-12-12T16:29:55,150 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:55,153 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#B#compaction#531 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:55,153 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/1c3c4d4ebc004bf0a20027fde904bccb is 50, key is test_row_0/B:col10/1734020992358/Put/seqid=0 2024-12-12T16:29:55,156 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212e6042d74daf34867b7851d4bc82db111_8e89f008dc04dbad786e718ad49c4912 store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:55,159 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212e6042d74daf34867b7851d4bc82db111_8e89f008dc04dbad786e718ad49c4912, store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:55,159 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212e6042d74daf34867b7851d4bc82db111_8e89f008dc04dbad786e718ad49c4912 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:55,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742453_1629 (size=12983) 2024-12-12T16:29:55,172 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:55,173 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-12T16:29:55,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:55,173 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T16:29:55,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:29:55,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:55,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:29:55,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:55,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:29:55,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:55,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742454_1630 (size=4469) 2024-12-12T16:29:55,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212899d48a263994db28e5cd58cc49a1fb5_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734020993509/Put/seqid=0 2024-12-12T16:29:55,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742455_1631 (size=12454) 2024-12-12T16:29:55,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:55,199 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212899d48a263994db28e5cd58cc49a1fb5_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212899d48a263994db28e5cd58cc49a1fb5_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:55,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/c517c652ff1b436e9f565e531333f918, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:55,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/c517c652ff1b436e9f565e531333f918 is 175, key is test_row_0/A:col10/1734020993509/Put/seqid=0 2024-12-12T16:29:55,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742456_1632 (size=31255) 2024-12-12T16:29:55,204 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=315, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/c517c652ff1b436e9f565e531333f918 2024-12-12T16:29:55,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-12T16:29:55,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/5dd82d2fa8b54eb3b31028b843609824 is 50, key is test_row_0/B:col10/1734020993509/Put/seqid=0 2024-12-12T16:29:55,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742457_1633 (size=12301) 2024-12-12T16:29:55,569 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/1c3c4d4ebc004bf0a20027fde904bccb as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/1c3c4d4ebc004bf0a20027fde904bccb 2024-12-12T16:29:55,573 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/B of 8e89f008dc04dbad786e718ad49c4912 into 1c3c4d4ebc004bf0a20027fde904bccb(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:55,573 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:55,573 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/B, priority=12, startTime=1734020995141; duration=0sec 2024-12-12T16:29:55,573 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:55,573 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:B 2024-12-12T16:29:55,573 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T16:29:55,575 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T16:29:55,575 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/C is initiating minor compaction (all files) 2024-12-12T16:29:55,575 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/C in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:55,575 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/863fdf10abca478788a7dfb9d91e3572, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/ee3593518ab24d149aae175f3a38e7c3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/6450c807d5274138a897697ec5b6d47e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/c4f59680a7f843e980cda773ccda3699] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=48.3 K 2024-12-12T16:29:55,575 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 863fdf10abca478788a7dfb9d91e3572, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1734020989973 2024-12-12T16:29:55,575 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting ee3593518ab24d149aae175f3a38e7c3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1734020990597 2024-12-12T16:29:55,576 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 6450c807d5274138a897697ec5b6d47e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1734020991233 2024-12-12T16:29:55,576 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting c4f59680a7f843e980cda773ccda3699, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1734020992358 2024-12-12T16:29:55,582 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#C#compaction#534 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:55,583 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/5ae5b40db68a4b05ba4d7170f65158a0 is 50, key is test_row_0/C:col10/1734020992358/Put/seqid=0 2024-12-12T16:29:55,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742458_1634 (size=12983) 2024-12-12T16:29:55,587 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#A#compaction#530 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:55,587 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/0b40c62ef3c446c5b56170ec915f30d2 is 175, key is test_row_0/A:col10/1734020992358/Put/seqid=0 2024-12-12T16:29:55,590 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/5ae5b40db68a4b05ba4d7170f65158a0 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/5ae5b40db68a4b05ba4d7170f65158a0 2024-12-12T16:29:55,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742459_1635 (size=31937) 2024-12-12T16:29:55,599 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/C of 8e89f008dc04dbad786e718ad49c4912 into 5ae5b40db68a4b05ba4d7170f65158a0(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:55,599 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:55,600 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/C, priority=12, startTime=1734020995141; duration=0sec 2024-12-12T16:29:55,600 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/0b40c62ef3c446c5b56170ec915f30d2 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/0b40c62ef3c446c5b56170ec915f30d2 2024-12-12T16:29:55,600 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:55,600 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:C 2024-12-12T16:29:55,603 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/A of 8e89f008dc04dbad786e718ad49c4912 into 0b40c62ef3c446c5b56170ec915f30d2(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:55,604 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:55,604 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/A, priority=12, startTime=1734020995140; duration=0sec 2024-12-12T16:29:55,604 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:55,604 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:A 2024-12-12T16:29:55,614 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/5dd82d2fa8b54eb3b31028b843609824 2024-12-12T16:29:55,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/c12140afd7214212bf2ecbb50709bf69 is 50, key is test_row_0/C:col10/1734020993509/Put/seqid=0 2024-12-12T16:29:55,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742460_1636 (size=12301) 2024-12-12T16:29:55,626 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/c12140afd7214212bf2ecbb50709bf69 2024-12-12T16:29:55,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/c517c652ff1b436e9f565e531333f918 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/c517c652ff1b436e9f565e531333f918 2024-12-12T16:29:55,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:55,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:55,635 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/c517c652ff1b436e9f565e531333f918, entries=150, sequenceid=315, filesize=30.5 K 2024-12-12T16:29:55,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/5dd82d2fa8b54eb3b31028b843609824 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5dd82d2fa8b54eb3b31028b843609824 2024-12-12T16:29:55,642 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5dd82d2fa8b54eb3b31028b843609824, entries=150, sequenceid=315, filesize=12.0 K 2024-12-12T16:29:55,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/c12140afd7214212bf2ecbb50709bf69 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/c12140afd7214212bf2ecbb50709bf69 2024-12-12T16:29:55,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:55,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021055642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:55,646 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/c12140afd7214212bf2ecbb50709bf69, entries=150, sequenceid=315, filesize=12.0 K 2024-12-12T16:29:55,647 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 8e89f008dc04dbad786e718ad49c4912 in 474ms, sequenceid=315, compaction requested=false 2024-12-12T16:29:55,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:55,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:55,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-12-12T16:29:55,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-12-12T16:29:55,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:55,649 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-12T16:29:55,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:29:55,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:55,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:29:55,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:55,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:29:55,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:55,650 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-12-12T16:29:55,650 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5440 sec 2024-12-12T16:29:55,652 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 1.5490 sec 2024-12-12T16:29:55,656 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126897636af43f4f49b5ae5ea5ba35ac65_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734020995644/Put/seqid=0 2024-12-12T16:29:55,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742461_1637 (size=12454) 2024-12-12T16:29:55,678 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:55,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021055676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:55,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:55,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021055677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:55,747 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:55,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021055746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:55,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:55,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021055779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:55,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:55,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021055780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:55,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:55,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021055949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:55,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:55,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021055982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:55,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:55,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021055984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:56,060 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:56,064 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126897636af43f4f49b5ae5ea5ba35ac65_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126897636af43f4f49b5ae5ea5ba35ac65_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:56,064 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/1309447e44ad4b7b954790665a7a9bb5, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:56,065 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/1309447e44ad4b7b954790665a7a9bb5 is 175, key is test_row_0/A:col10/1734020995644/Put/seqid=0 2024-12-12T16:29:56,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742462_1638 (size=31255) 2024-12-12T16:29:56,069 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=334, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/1309447e44ad4b7b954790665a7a9bb5 2024-12-12T16:29:56,075 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/bb466c7a44a5465e8f3507b995d39ef7 is 50, key is test_row_0/B:col10/1734020995644/Put/seqid=0 2024-12-12T16:29:56,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742463_1639 (size=12301) 2024-12-12T16:29:56,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-12T16:29:56,207 INFO [Thread-2445 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-12-12T16:29:56,208 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T16:29:56,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-12-12T16:29:56,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-12T16:29:56,210 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T16:29:56,210 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T16:29:56,211 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T16:29:56,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:56,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021056253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:56,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:56,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021056284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:56,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:56,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021056287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:56,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-12T16:29:56,362 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:56,362 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-12T16:29:56,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:56,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:56,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:56,362 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:56,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:56,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:56,479 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/bb466c7a44a5465e8f3507b995d39ef7 2024-12-12T16:29:56,485 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/624c6b36593048cb865d93d5cc9c9c75 is 50, key is test_row_0/C:col10/1734020995644/Put/seqid=0 2024-12-12T16:29:56,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742464_1640 (size=12301) 2024-12-12T16:29:56,501 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/624c6b36593048cb865d93d5cc9c9c75 2024-12-12T16:29:56,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/1309447e44ad4b7b954790665a7a9bb5 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1309447e44ad4b7b954790665a7a9bb5 2024-12-12T16:29:56,508 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1309447e44ad4b7b954790665a7a9bb5, entries=150, sequenceid=334, filesize=30.5 K 2024-12-12T16:29:56,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/bb466c7a44a5465e8f3507b995d39ef7 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/bb466c7a44a5465e8f3507b995d39ef7 2024-12-12T16:29:56,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-12T16:29:56,512 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/bb466c7a44a5465e8f3507b995d39ef7, entries=150, sequenceid=334, filesize=12.0 K 2024-12-12T16:29:56,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/624c6b36593048cb865d93d5cc9c9c75 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/624c6b36593048cb865d93d5cc9c9c75 2024-12-12T16:29:56,514 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:56,515 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-12T16:29:56,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:56,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:56,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:56,516 ERROR [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:56,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:56,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T16:29:56,518 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/624c6b36593048cb865d93d5cc9c9c75, entries=150, sequenceid=334, filesize=12.0 K 2024-12-12T16:29:56,519 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 8e89f008dc04dbad786e718ad49c4912 in 869ms, sequenceid=334, compaction requested=true 2024-12-12T16:29:56,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:56,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:29:56,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:56,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:29:56,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:56,519 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:56,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:29:56,519 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:56,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:56,520 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:56,520 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/B is initiating minor compaction (all files) 2024-12-12T16:29:56,520 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/B in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:56,520 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/1c3c4d4ebc004bf0a20027fde904bccb, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5dd82d2fa8b54eb3b31028b843609824, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/bb466c7a44a5465e8f3507b995d39ef7] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=36.7 K 2024-12-12T16:29:56,520 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94447 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:56,520 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/A is initiating minor compaction (all files) 2024-12-12T16:29:56,520 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c3c4d4ebc004bf0a20027fde904bccb, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1734020992358 2024-12-12T16:29:56,520 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/A in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:56,520 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/0b40c62ef3c446c5b56170ec915f30d2, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/c517c652ff1b436e9f565e531333f918, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1309447e44ad4b7b954790665a7a9bb5] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=92.2 K 2024-12-12T16:29:56,520 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:56,520 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/0b40c62ef3c446c5b56170ec915f30d2, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/c517c652ff1b436e9f565e531333f918, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1309447e44ad4b7b954790665a7a9bb5] 2024-12-12T16:29:56,520 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 5dd82d2fa8b54eb3b31028b843609824, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1734020993505 2024-12-12T16:29:56,521 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b40c62ef3c446c5b56170ec915f30d2, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1734020992358 2024-12-12T16:29:56,521 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting bb466c7a44a5465e8f3507b995d39ef7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1734020995644 2024-12-12T16:29:56,521 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting c517c652ff1b436e9f565e531333f918, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1734020993505 2024-12-12T16:29:56,521 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1309447e44ad4b7b954790665a7a9bb5, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1734020995644 2024-12-12T16:29:56,527 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:56,527 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#B#compaction#539 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:56,527 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/9f07e4552b304c14913a7533101e02fa is 50, key is test_row_0/B:col10/1734020995644/Put/seqid=0 2024-12-12T16:29:56,529 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212b5f4dc11ad84409b915a60be45da7f10_8e89f008dc04dbad786e718ad49c4912 store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:56,530 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212b5f4dc11ad84409b915a60be45da7f10_8e89f008dc04dbad786e718ad49c4912, store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:56,530 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212b5f4dc11ad84409b915a60be45da7f10_8e89f008dc04dbad786e718ad49c4912 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:56,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742465_1641 (size=13085) 2024-12-12T16:29:56,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742466_1642 (size=4469) 2024-12-12T16:29:56,668 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:56,668 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41933 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-12T16:29:56,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:56,669 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-12T16:29:56,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:29:56,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:56,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:29:56,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:56,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:29:56,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:56,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212650ab28de9ab4da19b2c0b2b501f2bdd_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734020995675/Put/seqid=0 2024-12-12T16:29:56,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742467_1643 (size=12454) 2024-12-12T16:29:56,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:56,683 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212650ab28de9ab4da19b2c0b2b501f2bdd_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212650ab28de9ab4da19b2c0b2b501f2bdd_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:56,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/68e3777d7df04a00b9179937f5a9bbf3, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:56,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/68e3777d7df04a00b9179937f5a9bbf3 is 175, key is test_row_0/A:col10/1734020995675/Put/seqid=0 2024-12-12T16:29:56,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742468_1644 (size=31255) 2024-12-12T16:29:56,688 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=354, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/68e3777d7df04a00b9179937f5a9bbf3 2024-12-12T16:29:56,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/83935cf491d64ce4afbba89756bd4295 is 50, key is test_row_0/B:col10/1734020995675/Put/seqid=0 2024-12-12T16:29:56,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742469_1645 (size=12301) 2024-12-12T16:29:56,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:56,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. as already flushing 2024-12-12T16:29:56,794 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:56,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021056791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:56,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:56,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021056792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:56,795 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:56,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021056792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:56,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-12T16:29:56,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:56,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021056895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:56,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:56,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021056895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:56,898 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:56,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021056895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:56,940 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/9f07e4552b304c14913a7533101e02fa as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/9f07e4552b304c14913a7533101e02fa 2024-12-12T16:29:56,940 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#A#compaction#540 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:56,941 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/f07a6a0a38a94b149934464dc5fa19aa is 175, key is test_row_0/A:col10/1734020995644/Put/seqid=0 2024-12-12T16:29:56,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742470_1646 (size=32039) 2024-12-12T16:29:56,946 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/B of 8e89f008dc04dbad786e718ad49c4912 into 9f07e4552b304c14913a7533101e02fa(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:56,946 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:56,946 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/B, priority=13, startTime=1734020996519; duration=0sec 2024-12-12T16:29:56,946 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:56,946 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:B 2024-12-12T16:29:56,946 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:56,948 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:56,948 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/C is initiating minor compaction (all files) 2024-12-12T16:29:56,948 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/C in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:56,948 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/5ae5b40db68a4b05ba4d7170f65158a0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/c12140afd7214212bf2ecbb50709bf69, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/624c6b36593048cb865d93d5cc9c9c75] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=36.7 K 2024-12-12T16:29:56,948 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ae5b40db68a4b05ba4d7170f65158a0, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1734020992358 2024-12-12T16:29:56,948 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting c12140afd7214212bf2ecbb50709bf69, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1734020993505 2024-12-12T16:29:56,949 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 624c6b36593048cb865d93d5cc9c9c75, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1734020995644 2024-12-12T16:29:56,950 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/f07a6a0a38a94b149934464dc5fa19aa as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/f07a6a0a38a94b149934464dc5fa19aa 2024-12-12T16:29:56,956 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#C#compaction#543 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:56,956 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/2a3a01ffb54a4860ab643d260865703f is 50, key is test_row_0/C:col10/1734020995644/Put/seqid=0 2024-12-12T16:29:56,962 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/A of 8e89f008dc04dbad786e718ad49c4912 into f07a6a0a38a94b149934464dc5fa19aa(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:56,962 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:56,962 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/A, priority=13, startTime=1734020996519; duration=0sec 2024-12-12T16:29:56,962 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:56,962 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:A 2024-12-12T16:29:56,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742471_1647 (size=13085) 2024-12-12T16:29:57,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:57,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021057098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:57,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:57,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021057098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:57,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:57,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021057100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:57,102 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/83935cf491d64ce4afbba89756bd4295 2024-12-12T16:29:57,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/2e3ba2dd73a6401e928d07fbaef0f777 is 50, key is test_row_0/C:col10/1734020995675/Put/seqid=0 2024-12-12T16:29:57,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742472_1648 (size=12301) 2024-12-12T16:29:57,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-12T16:29:57,371 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/2a3a01ffb54a4860ab643d260865703f as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/2a3a01ffb54a4860ab643d260865703f 2024-12-12T16:29:57,375 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/C of 8e89f008dc04dbad786e718ad49c4912 into 2a3a01ffb54a4860ab643d260865703f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:57,375 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:57,375 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/C, priority=13, startTime=1734020996519; duration=0sec 2024-12-12T16:29:57,375 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:57,375 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:C 2024-12-12T16:29:57,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:57,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56886 deadline: 1734021057402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:57,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:57,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T16:29:57,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56826 deadline: 1734021057402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:57,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41933 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56884 deadline: 1734021057402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:29:57,512 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/2e3ba2dd73a6401e928d07fbaef0f777 2024-12-12T16:29:57,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/68e3777d7df04a00b9179937f5a9bbf3 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/68e3777d7df04a00b9179937f5a9bbf3 2024-12-12T16:29:57,520 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/68e3777d7df04a00b9179937f5a9bbf3, entries=150, sequenceid=354, filesize=30.5 K 2024-12-12T16:29:57,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/83935cf491d64ce4afbba89756bd4295 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/83935cf491d64ce4afbba89756bd4295 2024-12-12T16:29:57,523 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/83935cf491d64ce4afbba89756bd4295, entries=150, sequenceid=354, filesize=12.0 K 2024-12-12T16:29:57,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/2e3ba2dd73a6401e928d07fbaef0f777 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/2e3ba2dd73a6401e928d07fbaef0f777 2024-12-12T16:29:57,527 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/2e3ba2dd73a6401e928d07fbaef0f777, entries=150, sequenceid=354, filesize=12.0 K 2024-12-12T16:29:57,528 INFO [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 8e89f008dc04dbad786e718ad49c4912 in 859ms, sequenceid=354, compaction requested=false 2024-12-12T16:29:57,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:57,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:57,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/4f6a4780a2f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-12-12T16:29:57,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-12-12T16:29:57,530 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-12T16:29:57,530 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3190 sec 2024-12-12T16:29:57,531 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 1.3220 sec 2024-12-12T16:29:57,588 DEBUG [Thread-2448 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x458a85fd to 127.0.0.1:52684 2024-12-12T16:29:57,588 DEBUG [Thread-2448 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:57,588 DEBUG [Thread-2452 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x67adb273 to 127.0.0.1:52684 2024-12-12T16:29:57,588 DEBUG [Thread-2452 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:57,589 DEBUG [Thread-2446 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06556601 to 127.0.0.1:52684 2024-12-12T16:29:57,589 DEBUG [Thread-2446 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:57,589 DEBUG [Thread-2454 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x474dec36 to 127.0.0.1:52684 2024-12-12T16:29:57,589 DEBUG [Thread-2454 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:57,590 DEBUG [Thread-2450 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x410bf0c8 to 127.0.0.1:52684 2024-12-12T16:29:57,590 DEBUG [Thread-2450 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:57,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41933 {}] regionserver.HRegion(8581): Flush requested on 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:57,906 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-12T16:29:57,907 DEBUG [Thread-2437 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5b914bf4 to 127.0.0.1:52684 2024-12-12T16:29:57,907 DEBUG [Thread-2441 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x150e08ed to 127.0.0.1:52684 2024-12-12T16:29:57,907 DEBUG [Thread-2439 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f6a59e4 to 127.0.0.1:52684 2024-12-12T16:29:57,907 DEBUG [Thread-2441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:57,907 DEBUG [Thread-2437 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:57,907 DEBUG [Thread-2439 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:29:57,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:29:57,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:57,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:29:57,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:57,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:29:57,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:29:57,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212bc4fdc6ffa2744b7acc0c9b73180c30a_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734020997905/Put/seqid=0 2024-12-12T16:29:57,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742473_1649 (size=12454) 2024-12-12T16:29:58,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-12T16:29:58,314 INFO [Thread-2445 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-12-12T16:29:58,315 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:29:58,318 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212bc4fdc6ffa2744b7acc0c9b73180c30a_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212bc4fdc6ffa2744b7acc0c9b73180c30a_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:29:58,319 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/3150f3d66c05499490c7f73bf60818ba, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:58,319 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/3150f3d66c05499490c7f73bf60818ba is 175, key is test_row_0/A:col10/1734020997905/Put/seqid=0 2024-12-12T16:29:58,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742474_1650 (size=31255) 2024-12-12T16:29:58,723 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=376, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/3150f3d66c05499490c7f73bf60818ba 2024-12-12T16:29:58,728 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/0062b34c475d47ab8c832e1090ac846b is 50, key is test_row_0/B:col10/1734020997905/Put/seqid=0 2024-12-12T16:29:58,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742475_1651 (size=12301) 2024-12-12T16:29:59,132 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/0062b34c475d47ab8c832e1090ac846b 2024-12-12T16:29:59,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/b11ac20c26134524a43d4b0e4c9bf65e is 50, key is test_row_0/C:col10/1734020997905/Put/seqid=0 2024-12-12T16:29:59,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742476_1652 (size=12301) 2024-12-12T16:29:59,541 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/b11ac20c26134524a43d4b0e4c9bf65e 2024-12-12T16:29:59,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/3150f3d66c05499490c7f73bf60818ba as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/3150f3d66c05499490c7f73bf60818ba 2024-12-12T16:29:59,546 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/3150f3d66c05499490c7f73bf60818ba, entries=150, sequenceid=376, filesize=30.5 K 2024-12-12T16:29:59,547 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/0062b34c475d47ab8c832e1090ac846b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/0062b34c475d47ab8c832e1090ac846b 2024-12-12T16:29:59,549 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/0062b34c475d47ab8c832e1090ac846b, entries=150, sequenceid=376, filesize=12.0 K 2024-12-12T16:29:59,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/b11ac20c26134524a43d4b0e4c9bf65e as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/b11ac20c26134524a43d4b0e4c9bf65e 2024-12-12T16:29:59,552 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/b11ac20c26134524a43d4b0e4c9bf65e, entries=150, sequenceid=376, filesize=12.0 K 2024-12-12T16:29:59,552 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=0 B/0 for 8e89f008dc04dbad786e718ad49c4912 in 1646ms, sequenceid=376, compaction requested=true 2024-12-12T16:29:59,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:59,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T16:29:59,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:59,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T16:29:59,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:29:59,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e89f008dc04dbad786e718ad49c4912:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T16:29:59,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:59,553 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:59,553 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:59,553 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:59,553 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94549 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:59,553 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/B is initiating minor compaction (all files) 2024-12-12T16:29:59,553 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/A is initiating minor compaction (all files) 2024-12-12T16:29:59,553 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/B in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:59,553 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/A in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:59,553 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/9f07e4552b304c14913a7533101e02fa, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/83935cf491d64ce4afbba89756bd4295, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/0062b34c475d47ab8c832e1090ac846b] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=36.8 K 2024-12-12T16:29:59,553 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/f07a6a0a38a94b149934464dc5fa19aa, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/68e3777d7df04a00b9179937f5a9bbf3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/3150f3d66c05499490c7f73bf60818ba] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=92.3 K 2024-12-12T16:29:59,554 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:59,554 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. files: [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/f07a6a0a38a94b149934464dc5fa19aa, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/68e3777d7df04a00b9179937f5a9bbf3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/3150f3d66c05499490c7f73bf60818ba] 2024-12-12T16:29:59,554 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f07e4552b304c14913a7533101e02fa, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1734020995644 2024-12-12T16:29:59,554 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting f07a6a0a38a94b149934464dc5fa19aa, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1734020995644 2024-12-12T16:29:59,554 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 83935cf491d64ce4afbba89756bd4295, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1734020995665 2024-12-12T16:29:59,554 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68e3777d7df04a00b9179937f5a9bbf3, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1734020995665 2024-12-12T16:29:59,554 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 0062b34c475d47ab8c832e1090ac846b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1734020997905 2024-12-12T16:29:59,554 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3150f3d66c05499490c7f73bf60818ba, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1734020997905 2024-12-12T16:29:59,558 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:59,560 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412121cf8923059284ca4aee42481251f5e1d_8e89f008dc04dbad786e718ad49c4912 store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:59,560 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#B#compaction#549 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:59,561 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/49890075daa34c82b861ece793c98ba7 is 50, key is test_row_0/B:col10/1734020997905/Put/seqid=0 2024-12-12T16:29:59,563 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412121cf8923059284ca4aee42481251f5e1d_8e89f008dc04dbad786e718ad49c4912, store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:59,563 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412121cf8923059284ca4aee42481251f5e1d_8e89f008dc04dbad786e718ad49c4912 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:29:59,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742477_1653 (size=13187) 2024-12-12T16:29:59,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742478_1654 (size=4469) 2024-12-12T16:29:59,967 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#A#compaction#548 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:59,967 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/6463a61a1e034508b20c264af793c5f1 is 175, key is test_row_0/A:col10/1734020997905/Put/seqid=0 2024-12-12T16:29:59,969 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/49890075daa34c82b861ece793c98ba7 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/49890075daa34c82b861ece793c98ba7 2024-12-12T16:29:59,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742479_1655 (size=32141) 2024-12-12T16:29:59,972 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/B of 8e89f008dc04dbad786e718ad49c4912 into 49890075daa34c82b861ece793c98ba7(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:29:59,972 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:29:59,973 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/B, priority=13, startTime=1734020999552; duration=0sec 2024-12-12T16:29:59,973 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T16:29:59,973 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:B 2024-12-12T16:29:59,973 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T16:29:59,973 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T16:29:59,973 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1540): 8e89f008dc04dbad786e718ad49c4912/C is initiating minor compaction (all files) 2024-12-12T16:29:59,973 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8e89f008dc04dbad786e718ad49c4912/C in TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:29:59,974 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/2a3a01ffb54a4860ab643d260865703f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/2e3ba2dd73a6401e928d07fbaef0f777, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/b11ac20c26134524a43d4b0e4c9bf65e] into tmpdir=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp, totalSize=36.8 K 2024-12-12T16:29:59,974 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a3a01ffb54a4860ab643d260865703f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1734020995644 2024-12-12T16:29:59,974 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e3ba2dd73a6401e928d07fbaef0f777, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1734020995665 2024-12-12T16:29:59,974 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] compactions.Compactor(224): Compacting b11ac20c26134524a43d4b0e4c9bf65e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1734020997905 2024-12-12T16:29:59,979 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e89f008dc04dbad786e718ad49c4912#C#compaction#550 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T16:29:59,979 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/f3bcefd76f0244ad9dd47800a45615fa is 50, key is test_row_0/C:col10/1734020997905/Put/seqid=0 2024-12-12T16:29:59,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742480_1656 (size=13187) 2024-12-12T16:30:00,374 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/6463a61a1e034508b20c264af793c5f1 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/6463a61a1e034508b20c264af793c5f1 2024-12-12T16:30:00,378 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/A of 8e89f008dc04dbad786e718ad49c4912 into 6463a61a1e034508b20c264af793c5f1(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:30:00,378 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:30:00,378 INFO [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/A, priority=13, startTime=1734020999552; duration=0sec 2024-12-12T16:30:00,378 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:30:00,378 DEBUG [RS:0;4f6a4780a2f6:41933-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:A 2024-12-12T16:30:00,385 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/f3bcefd76f0244ad9dd47800a45615fa as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/f3bcefd76f0244ad9dd47800a45615fa 2024-12-12T16:30:00,389 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8e89f008dc04dbad786e718ad49c4912/C of 8e89f008dc04dbad786e718ad49c4912 into f3bcefd76f0244ad9dd47800a45615fa(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T16:30:00,389 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:30:00,389 INFO [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912., storeName=8e89f008dc04dbad786e718ad49c4912/C, priority=13, startTime=1734020999552; duration=0sec 2024-12-12T16:30:00,389 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T16:30:00,389 DEBUG [RS:0;4f6a4780a2f6:41933-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e89f008dc04dbad786e718ad49c4912:C 2024-12-12T16:30:01,042 DEBUG [Thread-2435 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1584f18a to 127.0.0.1:52684 2024-12-12T16:30:01,042 DEBUG [Thread-2435 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:30:01,055 DEBUG [Thread-2443 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3a3b66d3 to 127.0.0.1:52684 2024-12-12T16:30:01,055 DEBUG [Thread-2443 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:30:01,056 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T16:30:01,056 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 31 2024-12-12T16:30:01,056 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 88 2024-12-12T16:30:01,056 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 74 2024-12-12T16:30:01,056 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 75 2024-12-12T16:30:01,056 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 28 2024-12-12T16:30:01,056 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T16:30:01,056 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6584 2024-12-12T16:30:01,056 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6553 2024-12-12T16:30:01,056 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6502 2024-12-12T16:30:01,056 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6563 2024-12-12T16:30:01,056 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6556 2024-12-12T16:30:01,056 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T16:30:01,056 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T16:30:01,056 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17327621 to 127.0.0.1:52684 2024-12-12T16:30:01,056 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:30:01,057 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T16:30:01,057 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T16:30:01,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T16:30:01,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-12T16:30:01,059 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734021001059"}]},"ts":"1734021001059"} 2024-12-12T16:30:01,060 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T16:30:01,062 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T16:30:01,062 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T16:30:01,063 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=177, ppid=176, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8e89f008dc04dbad786e718ad49c4912, UNASSIGN}] 2024-12-12T16:30:01,064 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=177, ppid=176, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8e89f008dc04dbad786e718ad49c4912, UNASSIGN 2024-12-12T16:30:01,064 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=177 updating hbase:meta row=8e89f008dc04dbad786e718ad49c4912, regionState=CLOSING, regionLocation=4f6a4780a2f6,41933,1734020809476 2024-12-12T16:30:01,065 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T16:30:01,065 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; CloseRegionProcedure 8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476}] 2024-12-12T16:30:01,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-12T16:30:01,216 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:30:01,216 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(124): Close 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:01,216 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T16:30:01,216 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1681): Closing 8e89f008dc04dbad786e718ad49c4912, disabling compactions & flushes 2024-12-12T16:30:01,217 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:30:01,217 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:30:01,217 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. after waiting 0 ms 2024-12-12T16:30:01,217 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:30:01,217 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(2837): Flushing 8e89f008dc04dbad786e718ad49c4912 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-12-12T16:30:01,217 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=A 2024-12-12T16:30:01,217 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:30:01,217 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=B 2024-12-12T16:30:01,217 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:30:01,217 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8e89f008dc04dbad786e718ad49c4912, store=C 2024-12-12T16:30:01,217 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T16:30:01,221 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121282437295296645b5872bee69075efbc2_8e89f008dc04dbad786e718ad49c4912 is 50, key is test_row_0/A:col10/1734021001041/Put/seqid=0 2024-12-12T16:30:01,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742481_1657 (size=9914) 2024-12-12T16:30:01,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-12T16:30:01,625 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T16:30:01,627 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121282437295296645b5872bee69075efbc2_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121282437295296645b5872bee69075efbc2_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:01,628 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/9812e6574b004a16a82e711a8b9baa6b, store: [table=TestAcidGuarantees family=A region=8e89f008dc04dbad786e718ad49c4912] 2024-12-12T16:30:01,628 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/9812e6574b004a16a82e711a8b9baa6b is 175, key is test_row_0/A:col10/1734021001041/Put/seqid=0 2024-12-12T16:30:01,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742482_1658 (size=22561) 2024-12-12T16:30:01,631 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=384, memsize=4.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/9812e6574b004a16a82e711a8b9baa6b 2024-12-12T16:30:01,636 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/7f496cec9a194b1abb97013e29736934 is 50, key is test_row_0/B:col10/1734021001041/Put/seqid=0 2024-12-12T16:30:01,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742483_1659 (size=9857) 2024-12-12T16:30:01,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-12T16:30:02,040 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=384 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/7f496cec9a194b1abb97013e29736934 2024-12-12T16:30:02,044 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/f10bfef911fb466fa34e4944ffb7531c is 50, key is test_row_0/C:col10/1734021001041/Put/seqid=0 2024-12-12T16:30:02,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742484_1660 (size=9857) 2024-12-12T16:30:02,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-12T16:30:02,448 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=384 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/f10bfef911fb466fa34e4944ffb7531c 2024-12-12T16:30:02,451 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/A/9812e6574b004a16a82e711a8b9baa6b as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/9812e6574b004a16a82e711a8b9baa6b 2024-12-12T16:30:02,454 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/9812e6574b004a16a82e711a8b9baa6b, entries=100, sequenceid=384, filesize=22.0 K 2024-12-12T16:30:02,455 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/B/7f496cec9a194b1abb97013e29736934 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/7f496cec9a194b1abb97013e29736934 2024-12-12T16:30:02,457 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/7f496cec9a194b1abb97013e29736934, entries=100, sequenceid=384, filesize=9.6 K 2024-12-12T16:30:02,458 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/.tmp/C/f10bfef911fb466fa34e4944ffb7531c as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/f10bfef911fb466fa34e4944ffb7531c 2024-12-12T16:30:02,461 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/f10bfef911fb466fa34e4944ffb7531c, entries=100, sequenceid=384, filesize=9.6 K 2024-12-12T16:30:02,462 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=0 B/0 for 8e89f008dc04dbad786e718ad49c4912 in 1245ms, sequenceid=384, compaction requested=false 2024-12-12T16:30:02,462 DEBUG [StoreCloser-TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/7b4f3d3543974dc890b6ad04694ed863, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/b5695cd415bc453496f96202b2b8aea4, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/7537cb8ddc66407181f1c7eca83d7f25, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/924ebe969c9841dc8a83f03efb333dca, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1cda643a71cb447097904fd5ccc4464c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1f5cdee28c734e66bc9f5a102ca7f416, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/4b649d74af354b578e17ff30779f8a46, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/b886420b8fac4b2ba98fdcb648622107, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/74d9010803c143dbbc1ac4d527ed84c5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/e8d15dd370774f928e74d455fb163584, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/c3b85297d86e44a688e051cb92750084, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/5d5cfe87f1ea499d9bfdd84a63f5e53c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/e8c758571db24b90bd544516b5f64bb8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/6826f425852a4c63889c84009165580d, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/ae94fbcf9b2847ed898777114040c9e0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/0101e4f6bec44d5383449c83948809a2, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1e8a462c57b1439fbc9e5555c161d3e6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/82242a442ca448c3ae161a3aadaeffb6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/3c24002ff14d4cf6babe7fb6573798ca, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/26971657f3a2439aa2b6198169c470a8, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/0b40c62ef3c446c5b56170ec915f30d2, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/c517c652ff1b436e9f565e531333f918, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/f07a6a0a38a94b149934464dc5fa19aa, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1309447e44ad4b7b954790665a7a9bb5, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/68e3777d7df04a00b9179937f5a9bbf3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/3150f3d66c05499490c7f73bf60818ba] to archive 2024-12-12T16:30:02,463 DEBUG [StoreCloser-TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:30:02,467 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1cda643a71cb447097904fd5ccc4464c to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1cda643a71cb447097904fd5ccc4464c 2024-12-12T16:30:02,467 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/b886420b8fac4b2ba98fdcb648622107 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/b886420b8fac4b2ba98fdcb648622107 2024-12-12T16:30:02,467 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1f5cdee28c734e66bc9f5a102ca7f416 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1f5cdee28c734e66bc9f5a102ca7f416 2024-12-12T16:30:02,468 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/7b4f3d3543974dc890b6ad04694ed863 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/7b4f3d3543974dc890b6ad04694ed863 2024-12-12T16:30:02,468 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/4b649d74af354b578e17ff30779f8a46 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/4b649d74af354b578e17ff30779f8a46 2024-12-12T16:30:02,468 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/7537cb8ddc66407181f1c7eca83d7f25 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/7537cb8ddc66407181f1c7eca83d7f25 2024-12-12T16:30:02,468 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/b5695cd415bc453496f96202b2b8aea4 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/b5695cd415bc453496f96202b2b8aea4 2024-12-12T16:30:02,469 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/924ebe969c9841dc8a83f03efb333dca to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/924ebe969c9841dc8a83f03efb333dca 2024-12-12T16:30:02,469 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/74d9010803c143dbbc1ac4d527ed84c5 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/74d9010803c143dbbc1ac4d527ed84c5 2024-12-12T16:30:02,469 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/e8d15dd370774f928e74d455fb163584 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/e8d15dd370774f928e74d455fb163584 2024-12-12T16:30:02,470 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/e8c758571db24b90bd544516b5f64bb8 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/e8c758571db24b90bd544516b5f64bb8 2024-12-12T16:30:02,470 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/c3b85297d86e44a688e051cb92750084 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/c3b85297d86e44a688e051cb92750084 2024-12-12T16:30:02,470 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/5d5cfe87f1ea499d9bfdd84a63f5e53c to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/5d5cfe87f1ea499d9bfdd84a63f5e53c 2024-12-12T16:30:02,471 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/6826f425852a4c63889c84009165580d to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/6826f425852a4c63889c84009165580d 2024-12-12T16:30:02,471 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/ae94fbcf9b2847ed898777114040c9e0 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/ae94fbcf9b2847ed898777114040c9e0 2024-12-12T16:30:02,471 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/0101e4f6bec44d5383449c83948809a2 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/0101e4f6bec44d5383449c83948809a2 2024-12-12T16:30:02,472 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/26971657f3a2439aa2b6198169c470a8 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/26971657f3a2439aa2b6198169c470a8 2024-12-12T16:30:02,472 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/82242a442ca448c3ae161a3aadaeffb6 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/82242a442ca448c3ae161a3aadaeffb6 2024-12-12T16:30:02,472 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1e8a462c57b1439fbc9e5555c161d3e6 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1e8a462c57b1439fbc9e5555c161d3e6 2024-12-12T16:30:02,472 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/3c24002ff14d4cf6babe7fb6573798ca to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/3c24002ff14d4cf6babe7fb6573798ca 2024-12-12T16:30:02,472 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/0b40c62ef3c446c5b56170ec915f30d2 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/0b40c62ef3c446c5b56170ec915f30d2 2024-12-12T16:30:02,473 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/f07a6a0a38a94b149934464dc5fa19aa to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/f07a6a0a38a94b149934464dc5fa19aa 2024-12-12T16:30:02,473 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/c517c652ff1b436e9f565e531333f918 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/c517c652ff1b436e9f565e531333f918 2024-12-12T16:30:02,473 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1309447e44ad4b7b954790665a7a9bb5 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/1309447e44ad4b7b954790665a7a9bb5 2024-12-12T16:30:02,474 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/68e3777d7df04a00b9179937f5a9bbf3 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/68e3777d7df04a00b9179937f5a9bbf3 2024-12-12T16:30:02,474 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/3150f3d66c05499490c7f73bf60818ba to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/3150f3d66c05499490c7f73bf60818ba 2024-12-12T16:30:02,475 DEBUG [StoreCloser-TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/eb87795c7e204c6eb70a48178eb3ccf6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/bcf68fa73d154b309e3ea567efc3aa44, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/0f93facd007c4284b26a4eedff2829cc, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/70ff85e9604e40ea90a0f90722861dc7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5a8615a7661e4470bea02cc74a202bcf, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/263113da39004d05a5d66232eeb271a4, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5ba1c59dffdc447f97d3b82b0ef68086, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/e8bd9ba7fc784588aa18fb6bf78ac138, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5b8a7328e7b8467fa5fa00f1dd6b92f2, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/a65c5ed986774393b42df9f7056c7914, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/35ed9744b44a4213bad32761bf468551, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/c7c026f0f39d4902af2107f82b7de86f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5c8a2cf94be5477a8abc66d9db0eca8c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/df908cbd137d45f2914985cb2f10006a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/16c8847be2a6448e90c9dccc4425b299, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/cae37e331f124bb686c022dabd68a526, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/6c0b529732284234bc8883b84084db6b, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/cd06d18fbe2648cca6ac7fe960a70ea2, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/10bcae9a186246de8b14d1f027fa8789, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/1c3c4d4ebc004bf0a20027fde904bccb, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/aa8f5633e2e642108f25bb1814c8210a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5dd82d2fa8b54eb3b31028b843609824, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/9f07e4552b304c14913a7533101e02fa, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/bb466c7a44a5465e8f3507b995d39ef7, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/83935cf491d64ce4afbba89756bd4295, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/0062b34c475d47ab8c832e1090ac846b] to archive 2024-12-12T16:30:02,476 DEBUG [StoreCloser-TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:30:02,478 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/0f93facd007c4284b26a4eedff2829cc to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/0f93facd007c4284b26a4eedff2829cc 2024-12-12T16:30:02,478 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/eb87795c7e204c6eb70a48178eb3ccf6 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/eb87795c7e204c6eb70a48178eb3ccf6 2024-12-12T16:30:02,479 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/263113da39004d05a5d66232eeb271a4 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/263113da39004d05a5d66232eeb271a4 2024-12-12T16:30:02,479 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/bcf68fa73d154b309e3ea567efc3aa44 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/bcf68fa73d154b309e3ea567efc3aa44 2024-12-12T16:30:02,480 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/e8bd9ba7fc784588aa18fb6bf78ac138 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/e8bd9ba7fc784588aa18fb6bf78ac138 2024-12-12T16:30:02,480 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/70ff85e9604e40ea90a0f90722861dc7 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/70ff85e9604e40ea90a0f90722861dc7 2024-12-12T16:30:02,480 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5a8615a7661e4470bea02cc74a202bcf to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5a8615a7661e4470bea02cc74a202bcf 2024-12-12T16:30:02,480 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5ba1c59dffdc447f97d3b82b0ef68086 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5ba1c59dffdc447f97d3b82b0ef68086 2024-12-12T16:30:02,482 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/a65c5ed986774393b42df9f7056c7914 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/a65c5ed986774393b42df9f7056c7914 2024-12-12T16:30:02,482 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5b8a7328e7b8467fa5fa00f1dd6b92f2 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5b8a7328e7b8467fa5fa00f1dd6b92f2 2024-12-12T16:30:02,482 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/35ed9744b44a4213bad32761bf468551 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/35ed9744b44a4213bad32761bf468551 2024-12-12T16:30:02,482 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/c7c026f0f39d4902af2107f82b7de86f to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/c7c026f0f39d4902af2107f82b7de86f 2024-12-12T16:30:02,482 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/df908cbd137d45f2914985cb2f10006a to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/df908cbd137d45f2914985cb2f10006a 2024-12-12T16:30:02,483 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5c8a2cf94be5477a8abc66d9db0eca8c to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5c8a2cf94be5477a8abc66d9db0eca8c 2024-12-12T16:30:02,483 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/16c8847be2a6448e90c9dccc4425b299 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/16c8847be2a6448e90c9dccc4425b299 2024-12-12T16:30:02,483 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/cae37e331f124bb686c022dabd68a526 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/cae37e331f124bb686c022dabd68a526 2024-12-12T16:30:02,484 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/6c0b529732284234bc8883b84084db6b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/6c0b529732284234bc8883b84084db6b 2024-12-12T16:30:02,484 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/cd06d18fbe2648cca6ac7fe960a70ea2 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/cd06d18fbe2648cca6ac7fe960a70ea2 2024-12-12T16:30:02,484 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/10bcae9a186246de8b14d1f027fa8789 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/10bcae9a186246de8b14d1f027fa8789 2024-12-12T16:30:02,484 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/9f07e4552b304c14913a7533101e02fa to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/9f07e4552b304c14913a7533101e02fa 2024-12-12T16:30:02,484 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5dd82d2fa8b54eb3b31028b843609824 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/5dd82d2fa8b54eb3b31028b843609824 2024-12-12T16:30:02,485 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/1c3c4d4ebc004bf0a20027fde904bccb to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/1c3c4d4ebc004bf0a20027fde904bccb 2024-12-12T16:30:02,485 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/aa8f5633e2e642108f25bb1814c8210a to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/aa8f5633e2e642108f25bb1814c8210a 2024-12-12T16:30:02,485 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/bb466c7a44a5465e8f3507b995d39ef7 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/bb466c7a44a5465e8f3507b995d39ef7 2024-12-12T16:30:02,485 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/0062b34c475d47ab8c832e1090ac846b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/0062b34c475d47ab8c832e1090ac846b 2024-12-12T16:30:02,486 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/83935cf491d64ce4afbba89756bd4295 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/83935cf491d64ce4afbba89756bd4295 2024-12-12T16:30:02,487 DEBUG [StoreCloser-TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/8ea123623d854fabb750f70bad5bc6e9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/269d6a6beeb949308494961a577e9d23, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/151682da3b014cd5b3f5e8dbca5f8ba1, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/28340b73d061408cb579aa75ca1f353c, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/52a33969a9094e9bad075fa208c29051, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/4ff5ad155f1240089773022512fe2ef9, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/dee4f96e33e54cb09ffd20095be0abe2, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/01382bac8ccc47aab08d79016f36feb6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/018267f7467f4b86814076139ad04ee6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/374ea1dd6dfb492f94cca00d88517a7a, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/80b934779eb34133a598dd8d5186f932, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/bab55973620a4fbe86b1729e25768e31, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/3b8b55346e884ca7b3d70653c95d6c2f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/cc51d57a88c24064ad658584757f91b6, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/01a7d3b3eb1d494a8fca6282d14fe067, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/863fdf10abca478788a7dfb9d91e3572, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/88df406208f24090a14f19f7a240d3fb, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/ee3593518ab24d149aae175f3a38e7c3, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/6450c807d5274138a897697ec5b6d47e, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/5ae5b40db68a4b05ba4d7170f65158a0, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/c4f59680a7f843e980cda773ccda3699, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/c12140afd7214212bf2ecbb50709bf69, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/2a3a01ffb54a4860ab643d260865703f, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/624c6b36593048cb865d93d5cc9c9c75, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/2e3ba2dd73a6401e928d07fbaef0f777, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/b11ac20c26134524a43d4b0e4c9bf65e] to archive 2024-12-12T16:30:02,488 DEBUG [StoreCloser-TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T16:30:02,490 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/8ea123623d854fabb750f70bad5bc6e9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/8ea123623d854fabb750f70bad5bc6e9 2024-12-12T16:30:02,490 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/dee4f96e33e54cb09ffd20095be0abe2 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/dee4f96e33e54cb09ffd20095be0abe2 2024-12-12T16:30:02,490 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/01382bac8ccc47aab08d79016f36feb6 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/01382bac8ccc47aab08d79016f36feb6 2024-12-12T16:30:02,490 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/52a33969a9094e9bad075fa208c29051 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/52a33969a9094e9bad075fa208c29051 2024-12-12T16:30:02,490 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/269d6a6beeb949308494961a577e9d23 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/269d6a6beeb949308494961a577e9d23 2024-12-12T16:30:02,490 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/28340b73d061408cb579aa75ca1f353c to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/28340b73d061408cb579aa75ca1f353c 2024-12-12T16:30:02,491 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/4ff5ad155f1240089773022512fe2ef9 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/4ff5ad155f1240089773022512fe2ef9 2024-12-12T16:30:02,491 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/151682da3b014cd5b3f5e8dbca5f8ba1 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/151682da3b014cd5b3f5e8dbca5f8ba1 2024-12-12T16:30:02,492 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/80b934779eb34133a598dd8d5186f932 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/80b934779eb34133a598dd8d5186f932 2024-12-12T16:30:02,492 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/018267f7467f4b86814076139ad04ee6 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/018267f7467f4b86814076139ad04ee6 2024-12-12T16:30:02,492 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/374ea1dd6dfb492f94cca00d88517a7a to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/374ea1dd6dfb492f94cca00d88517a7a 2024-12-12T16:30:02,492 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/3b8b55346e884ca7b3d70653c95d6c2f to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/3b8b55346e884ca7b3d70653c95d6c2f 2024-12-12T16:30:02,492 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/bab55973620a4fbe86b1729e25768e31 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/bab55973620a4fbe86b1729e25768e31 2024-12-12T16:30:02,493 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/cc51d57a88c24064ad658584757f91b6 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/cc51d57a88c24064ad658584757f91b6 2024-12-12T16:30:02,493 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/01a7d3b3eb1d494a8fca6282d14fe067 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/01a7d3b3eb1d494a8fca6282d14fe067 2024-12-12T16:30:02,494 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/88df406208f24090a14f19f7a240d3fb to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/88df406208f24090a14f19f7a240d3fb 2024-12-12T16:30:02,494 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/863fdf10abca478788a7dfb9d91e3572 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/863fdf10abca478788a7dfb9d91e3572 2024-12-12T16:30:02,494 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/ee3593518ab24d149aae175f3a38e7c3 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/ee3593518ab24d149aae175f3a38e7c3 2024-12-12T16:30:02,494 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/5ae5b40db68a4b05ba4d7170f65158a0 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/5ae5b40db68a4b05ba4d7170f65158a0 2024-12-12T16:30:02,495 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/c4f59680a7f843e980cda773ccda3699 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/c4f59680a7f843e980cda773ccda3699 2024-12-12T16:30:02,495 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/6450c807d5274138a897697ec5b6d47e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/6450c807d5274138a897697ec5b6d47e 2024-12-12T16:30:02,495 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/c12140afd7214212bf2ecbb50709bf69 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/c12140afd7214212bf2ecbb50709bf69 2024-12-12T16:30:02,495 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/2a3a01ffb54a4860ab643d260865703f to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/2a3a01ffb54a4860ab643d260865703f 2024-12-12T16:30:02,495 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/624c6b36593048cb865d93d5cc9c9c75 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/624c6b36593048cb865d93d5cc9c9c75 2024-12-12T16:30:02,495 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/2e3ba2dd73a6401e928d07fbaef0f777 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/2e3ba2dd73a6401e928d07fbaef0f777 2024-12-12T16:30:02,496 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/b11ac20c26134524a43d4b0e4c9bf65e to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/b11ac20c26134524a43d4b0e4c9bf65e 2024-12-12T16:30:02,499 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/recovered.edits/387.seqid, newMaxSeqId=387, maxSeqId=4 2024-12-12T16:30:02,500 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912. 2024-12-12T16:30:02,500 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1635): Region close journal for 8e89f008dc04dbad786e718ad49c4912: 2024-12-12T16:30:02,501 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(170): Closed 8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:02,502 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=177 updating hbase:meta row=8e89f008dc04dbad786e718ad49c4912, regionState=CLOSED 2024-12-12T16:30:02,504 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-12-12T16:30:02,504 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; CloseRegionProcedure 8e89f008dc04dbad786e718ad49c4912, server=4f6a4780a2f6,41933,1734020809476 in 1.4380 sec 2024-12-12T16:30:02,505 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=177, resume processing ppid=176 2024-12-12T16:30:02,505 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, ppid=176, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8e89f008dc04dbad786e718ad49c4912, UNASSIGN in 1.4410 sec 2024-12-12T16:30:02,506 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-12-12T16:30:02,506 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4430 sec 2024-12-12T16:30:02,507 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734021002507"}]},"ts":"1734021002507"} 2024-12-12T16:30:02,508 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T16:30:02,510 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T16:30:02,511 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4530 sec 2024-12-12T16:30:03,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-12T16:30:03,162 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-12-12T16:30:03,163 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T16:30:03,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:30:03,164 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:30:03,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-12T16:30:03,165 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=179, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:30:03,167 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,168 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A, FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B, FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C, FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/recovered.edits] 2024-12-12T16:30:03,171 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/6463a61a1e034508b20c264af793c5f1 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/6463a61a1e034508b20c264af793c5f1 2024-12-12T16:30:03,171 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/9812e6574b004a16a82e711a8b9baa6b to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/A/9812e6574b004a16a82e711a8b9baa6b 2024-12-12T16:30:03,174 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/49890075daa34c82b861ece793c98ba7 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/49890075daa34c82b861ece793c98ba7 2024-12-12T16:30:03,174 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/7f496cec9a194b1abb97013e29736934 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/B/7f496cec9a194b1abb97013e29736934 2024-12-12T16:30:03,175 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/f10bfef911fb466fa34e4944ffb7531c to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/f10bfef911fb466fa34e4944ffb7531c 2024-12-12T16:30:03,175 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/f3bcefd76f0244ad9dd47800a45615fa to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/C/f3bcefd76f0244ad9dd47800a45615fa 2024-12-12T16:30:03,178 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/recovered.edits/387.seqid to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912/recovered.edits/387.seqid 2024-12-12T16:30:03,178 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/default/TestAcidGuarantees/8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,178 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T16:30:03,178 DEBUG [PEWorker-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T16:30:03,179 DEBUG [PEWorker-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-12T16:30:03,185 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120304b98f4a774b2aa07acc31803eceab_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120304b98f4a774b2aa07acc31803eceab_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,185 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212121c4ef367884d2bb57892ede3b8fa10_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212121c4ef367884d2bb57892ede3b8fa10_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,185 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123b1288a21cc348f787422d0215668a9a_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123b1288a21cc348f787422d0215668a9a_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,185 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124114228b49174eee9ebb8e35b8156e3e_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124114228b49174eee9ebb8e35b8156e3e_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,185 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121210bfd24428044411acc5a418de51b34e_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121210bfd24428044411acc5a418de51b34e_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,185 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120c545e92852546a19c9a7c540dac65be_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412120c545e92852546a19c9a7c540dac65be_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,185 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121249ecd3516a204b589db131139d6e942e_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121249ecd3516a204b589db131139d6e942e_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,186 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212650ab28de9ab4da19b2c0b2b501f2bdd_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212650ab28de9ab4da19b2c0b2b501f2bdd_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,186 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127f536cda3b9c466fae559322d0f9d52e_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127f536cda3b9c466fae559322d0f9d52e_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,187 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126fb574a4569845ae94a5cc3072c3e38e_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126fb574a4569845ae94a5cc3072c3e38e_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,187 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212838130139f6447499b4fbafdbdec3664_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212838130139f6447499b4fbafdbdec3664_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,187 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121282437295296645b5872bee69075efbc2_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121282437295296645b5872bee69075efbc2_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,187 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126ae32fb6d53d446f8e25c2d476503225_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126ae32fb6d53d446f8e25c2d476503225_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,187 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212876076b7e7e04cb7ac15c2987b396cec_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212876076b7e7e04cb7ac15c2987b396cec_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,187 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212899d48a263994db28e5cd58cc49a1fb5_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212899d48a263994db28e5cd58cc49a1fb5_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,187 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126897636af43f4f49b5ae5ea5ba35ac65_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126897636af43f4f49b5ae5ea5ba35ac65_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,188 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212973bf18f655842fba491afe9a69a6da6_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212973bf18f655842fba491afe9a69a6da6_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,188 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129f750deb8c2a462aa304e84ecbf71624_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129f750deb8c2a462aa304e84ecbf71624_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,188 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212b85ff5ba5a7d4ab2894eae08b9ac988d_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212b85ff5ba5a7d4ab2894eae08b9ac988d_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,188 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212bc4fdc6ffa2744b7acc0c9b73180c30a_8e89f008dc04dbad786e718ad49c4912 to hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212bc4fdc6ffa2744b7acc0c9b73180c30a_8e89f008dc04dbad786e718ad49c4912 2024-12-12T16:30:03,189 DEBUG [PEWorker-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T16:30:03,191 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=179, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:30:03,192 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T16:30:03,194 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T16:30:03,195 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=179, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:30:03,195 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T16:30:03,195 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734021003195"}]},"ts":"9223372036854775807"} 2024-12-12T16:30:03,196 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T16:30:03,196 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 8e89f008dc04dbad786e718ad49c4912, NAME => 'TestAcidGuarantees,,1734020974503.8e89f008dc04dbad786e718ad49c4912.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T16:30:03,196 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T16:30:03,196 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734021003196"}]},"ts":"9223372036854775807"} 2024-12-12T16:30:03,197 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T16:30:03,199 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=179, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T16:30:03,200 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 36 msec 2024-12-12T16:30:03,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33187 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-12T16:30:03,266 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-12-12T16:30:03,276 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=245 (was 244) - Thread LEAK? -, OpenFileDescriptor=448 (was 445) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=378 (was 377) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7596 (was 7644) 2024-12-12T16:30:03,276 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-12T16:30:03,276 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T16:30:03,276 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e83c466 to 127.0.0.1:52684 2024-12-12T16:30:03,276 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:30:03,276 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-12T16:30:03,276 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=634853251, stopped=false 2024-12-12T16:30:03,276 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=4f6a4780a2f6,33187,1734020808717 2024-12-12T16:30:03,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T16:30:03,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T16:30:03,278 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-12T16:30:03,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T16:30:03,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T16:30:03,279 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:30:03,279 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '4f6a4780a2f6,41933,1734020809476' ***** 2024-12-12T16:30:03,279 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-12T16:30:03,279 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T16:30:03,280 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-12T16:30:03,280 INFO [RS:0;4f6a4780a2f6:41933 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-12T16:30:03,280 INFO [RS:0;4f6a4780a2f6:41933 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-12T16:30:03,280 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-12T16:30:03,280 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(3579): Received CLOSE for f489058ee189c52324fddaaf21558958 2024-12-12T16:30:03,280 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(1224): stopping server 4f6a4780a2f6,41933,1734020809476 2024-12-12T16:30:03,281 DEBUG [RS:0;4f6a4780a2f6:41933 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:30:03,281 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-12T16:30:03,281 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-12T16:30:03,281 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-12T16:30:03,281 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-12T16:30:03,281 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing f489058ee189c52324fddaaf21558958, disabling compactions & flushes 2024-12-12T16:30:03,281 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-12T16:30:03,281 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958. 2024-12-12T16:30:03,281 DEBUG [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(1603): Online Regions={f489058ee189c52324fddaaf21558958=hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958., 1588230740=hbase:meta,,1.1588230740} 2024-12-12T16:30:03,281 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958. 2024-12-12T16:30:03,281 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958. after waiting 0 ms 2024-12-12T16:30:03,281 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958. 2024-12-12T16:30:03,281 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing f489058ee189c52324fddaaf21558958 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-12T16:30:03,281 DEBUG [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-12T16:30:03,281 INFO [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-12T16:30:03,281 DEBUG [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-12T16:30:03,281 DEBUG [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-12T16:30:03,281 DEBUG [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-12T16:30:03,281 INFO [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-12T16:30:03,282 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T16:30:03,285 DEBUG [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, f489058ee189c52324fddaaf21558958 2024-12-12T16:30:03,301 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/namespace/f489058ee189c52324fddaaf21558958/.tmp/info/ed905dd2090843ada5566452184cdf39 is 45, key is default/info:d/1734020814423/Put/seqid=0 2024-12-12T16:30:03,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742485_1661 (size=5037) 2024-12-12T16:30:03,316 DEBUG [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740/.tmp/info/17c229deeb234ddbbcd449ee004943f0 is 143, key is hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958./info:regioninfo/1734020814302/Put/seqid=0 2024-12-12T16:30:03,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742486_1662 (size=7725) 2024-12-12T16:30:03,328 INFO [regionserver/4f6a4780a2f6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T16:30:03,485 DEBUG [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, f489058ee189c52324fddaaf21558958 2024-12-12T16:30:03,685 DEBUG [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, f489058ee189c52324fddaaf21558958 2024-12-12T16:30:03,705 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/namespace/f489058ee189c52324fddaaf21558958/.tmp/info/ed905dd2090843ada5566452184cdf39 2024-12-12T16:30:03,708 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/namespace/f489058ee189c52324fddaaf21558958/.tmp/info/ed905dd2090843ada5566452184cdf39 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/namespace/f489058ee189c52324fddaaf21558958/info/ed905dd2090843ada5566452184cdf39 2024-12-12T16:30:03,711 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/namespace/f489058ee189c52324fddaaf21558958/info/ed905dd2090843ada5566452184cdf39, entries=2, sequenceid=6, filesize=4.9 K 2024-12-12T16:30:03,712 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for f489058ee189c52324fddaaf21558958 in 431ms, sequenceid=6, compaction requested=false 2024-12-12T16:30:03,715 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/namespace/f489058ee189c52324fddaaf21558958/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T16:30:03,715 INFO [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958. 2024-12-12T16:30:03,715 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for f489058ee189c52324fddaaf21558958: 2024-12-12T16:30:03,715 DEBUG [RS_CLOSE_REGION-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734020813467.f489058ee189c52324fddaaf21558958. 2024-12-12T16:30:03,720 INFO [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740/.tmp/info/17c229deeb234ddbbcd449ee004943f0 2024-12-12T16:30:03,737 DEBUG [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740/.tmp/rep_barrier/5420ea41cb024911abc174f904c65c35 is 102, key is TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8./rep_barrier:/1734020848625/DeleteFamily/seqid=0 2024-12-12T16:30:03,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742487_1663 (size=6025) 2024-12-12T16:30:03,885 DEBUG [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-12T16:30:04,086 DEBUG [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-12T16:30:04,123 INFO [regionserver/4f6a4780a2f6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-12T16:30:04,123 INFO [regionserver/4f6a4780a2f6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-12T16:30:04,141 INFO [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740/.tmp/rep_barrier/5420ea41cb024911abc174f904c65c35 2024-12-12T16:30:04,159 DEBUG [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740/.tmp/table/644ec64b61514349887bb3ac9cf43536 is 96, key is TestAcidGuarantees,,1734020814636.b22602467dd4e6c94f26649b7855f8e8./table:/1734020848625/DeleteFamily/seqid=0 2024-12-12T16:30:04,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742488_1664 (size=5942) 2024-12-12T16:30:04,162 INFO [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740/.tmp/table/644ec64b61514349887bb3ac9cf43536 2024-12-12T16:30:04,165 DEBUG [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740/.tmp/info/17c229deeb234ddbbcd449ee004943f0 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740/info/17c229deeb234ddbbcd449ee004943f0 2024-12-12T16:30:04,167 INFO [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740/info/17c229deeb234ddbbcd449ee004943f0, entries=22, sequenceid=93, filesize=7.5 K 2024-12-12T16:30:04,168 DEBUG [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740/.tmp/rep_barrier/5420ea41cb024911abc174f904c65c35 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740/rep_barrier/5420ea41cb024911abc174f904c65c35 2024-12-12T16:30:04,171 INFO [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740/rep_barrier/5420ea41cb024911abc174f904c65c35, entries=6, sequenceid=93, filesize=5.9 K 2024-12-12T16:30:04,171 DEBUG [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740/.tmp/table/644ec64b61514349887bb3ac9cf43536 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740/table/644ec64b61514349887bb3ac9cf43536 2024-12-12T16:30:04,173 INFO [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740/table/644ec64b61514349887bb3ac9cf43536, entries=9, sequenceid=93, filesize=5.8 K 2024-12-12T16:30:04,174 INFO [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 893ms, sequenceid=93, compaction requested=false 2024-12-12T16:30:04,177 DEBUG [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-12T16:30:04,178 DEBUG [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-12T16:30:04,178 INFO [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-12T16:30:04,178 DEBUG [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-12T16:30:04,178 DEBUG [RS_CLOSE_META-regionserver/4f6a4780a2f6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-12T16:30:04,286 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(1250): stopping server 4f6a4780a2f6,41933,1734020809476; all regions closed. 2024-12-12T16:30:04,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741834_1010 (size=26050) 2024-12-12T16:30:04,291 DEBUG [RS:0;4f6a4780a2f6:41933 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/oldWALs 2024-12-12T16:30:04,291 INFO [RS:0;4f6a4780a2f6:41933 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 4f6a4780a2f6%2C41933%2C1734020809476.meta:.meta(num 1734020813212) 2024-12-12T16:30:04,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741832_1008 (size=15294813) 2024-12-12T16:30:04,295 DEBUG [RS:0;4f6a4780a2f6:41933 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/oldWALs 2024-12-12T16:30:04,295 INFO [RS:0;4f6a4780a2f6:41933 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 4f6a4780a2f6%2C41933%2C1734020809476:(num 1734020812254) 2024-12-12T16:30:04,295 DEBUG [RS:0;4f6a4780a2f6:41933 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:30:04,295 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T16:30:04,295 INFO [RS:0;4f6a4780a2f6:41933 {}] hbase.ChoreService(370): Chore service for: regionserver/4f6a4780a2f6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-12T16:30:04,295 INFO [regionserver/4f6a4780a2f6:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-12T16:30:04,296 INFO [RS:0;4f6a4780a2f6:41933 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41933 2024-12-12T16:30:04,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T16:30:04,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/4f6a4780a2f6,41933,1734020809476 2024-12-12T16:30:04,301 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [4f6a4780a2f6,41933,1734020809476] 2024-12-12T16:30:04,301 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 4f6a4780a2f6,41933,1734020809476; numProcessing=1 2024-12-12T16:30:04,303 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/4f6a4780a2f6,41933,1734020809476 already deleted, retry=false 2024-12-12T16:30:04,303 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 4f6a4780a2f6,41933,1734020809476 expired; onlineServers=0 2024-12-12T16:30:04,303 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '4f6a4780a2f6,33187,1734020808717' ***** 2024-12-12T16:30:04,303 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-12T16:30:04,303 DEBUG [M:0;4f6a4780a2f6:33187 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b81c190, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=4f6a4780a2f6/172.17.0.2:0 2024-12-12T16:30:04,303 INFO [M:0;4f6a4780a2f6:33187 {}] regionserver.HRegionServer(1224): stopping server 4f6a4780a2f6,33187,1734020808717 2024-12-12T16:30:04,303 INFO [M:0;4f6a4780a2f6:33187 {}] regionserver.HRegionServer(1250): stopping server 4f6a4780a2f6,33187,1734020808717; all regions closed. 2024-12-12T16:30:04,303 DEBUG [M:0;4f6a4780a2f6:33187 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T16:30:04,303 DEBUG [M:0;4f6a4780a2f6:33187 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-12T16:30:04,303 DEBUG [M:0;4f6a4780a2f6:33187 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-12T16:30:04,304 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-12T16:30:04,304 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster-HFileCleaner.large.0-1734020811974 {}] cleaner.HFileCleaner(306): Exit Thread[master/4f6a4780a2f6:0:becomeActiveMaster-HFileCleaner.large.0-1734020811974,5,FailOnTimeoutGroup] 2024-12-12T16:30:04,304 DEBUG [master/4f6a4780a2f6:0:becomeActiveMaster-HFileCleaner.small.0-1734020811974 {}] cleaner.HFileCleaner(306): Exit Thread[master/4f6a4780a2f6:0:becomeActiveMaster-HFileCleaner.small.0-1734020811974,5,FailOnTimeoutGroup] 2024-12-12T16:30:04,304 INFO [M:0;4f6a4780a2f6:33187 {}] hbase.ChoreService(370): Chore service for: master/4f6a4780a2f6:0 had [] on shutdown 2024-12-12T16:30:04,304 DEBUG [M:0;4f6a4780a2f6:33187 {}] master.HMaster(1733): Stopping service threads 2024-12-12T16:30:04,304 INFO [M:0;4f6a4780a2f6:33187 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-12T16:30:04,304 ERROR [M:0;4f6a4780a2f6:33187 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (59733779) connection to localhost/127.0.0.1:45065 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:45065,5,PEWorkerGroup] 2024-12-12T16:30:04,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-12T16:30:04,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T16:30:04,305 INFO [M:0;4f6a4780a2f6:33187 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-12T16:30:04,305 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T16:30:04,305 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-12T16:30:04,306 DEBUG [M:0;4f6a4780a2f6:33187 {}] zookeeper.ZKUtil(347): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-12T16:30:04,306 WARN [M:0;4f6a4780a2f6:33187 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-12T16:30:04,306 INFO [M:0;4f6a4780a2f6:33187 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-12T16:30:04,306 INFO [M:0;4f6a4780a2f6:33187 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-12T16:30:04,306 DEBUG [M:0;4f6a4780a2f6:33187 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-12T16:30:04,306 INFO [M:0;4f6a4780a2f6:33187 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T16:30:04,306 DEBUG [M:0;4f6a4780a2f6:33187 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T16:30:04,306 DEBUG [M:0;4f6a4780a2f6:33187 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-12T16:30:04,306 DEBUG [M:0;4f6a4780a2f6:33187 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T16:30:04,306 INFO [M:0;4f6a4780a2f6:33187 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=753.72 KB heapSize=925.45 KB 2024-12-12T16:30:04,322 DEBUG [M:0;4f6a4780a2f6:33187 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/03ddf111cffd497295ea892d2b8c5cd5 is 82, key is hbase:meta,,1/info:regioninfo/1734020813350/Put/seqid=0 2024-12-12T16:30:04,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742489_1665 (size=5672) 2024-12-12T16:30:04,402 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T16:30:04,402 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x100870d75a70001, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T16:30:04,402 INFO [RS:0;4f6a4780a2f6:41933 {}] regionserver.HRegionServer(1307): Exiting; stopping=4f6a4780a2f6,41933,1734020809476; zookeeper connection closed. 2024-12-12T16:30:04,403 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1f1f20cc {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1f1f20cc 2024-12-12T16:30:04,403 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-12T16:30:04,730 INFO [M:0;4f6a4780a2f6:33187 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2111 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/03ddf111cffd497295ea892d2b8c5cd5 2024-12-12T16:30:04,751 DEBUG [M:0;4f6a4780a2f6:33187 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3fd6c846e1f74fd4a4a0dfe841aecd55 is 2284, key is \x00\x00\x00\x00\x00\x00\x00\x9C/proc:d/1734020977518/Put/seqid=0 2024-12-12T16:30:04,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742490_1666 (size=45052) 2024-12-12T16:30:05,156 INFO [M:0;4f6a4780a2f6:33187 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=753.17 KB at sequenceid=2111 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3fd6c846e1f74fd4a4a0dfe841aecd55 2024-12-12T16:30:05,160 INFO [M:0;4f6a4780a2f6:33187 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3fd6c846e1f74fd4a4a0dfe841aecd55 2024-12-12T16:30:05,175 DEBUG [M:0;4f6a4780a2f6:33187 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/103b9965e12243fe84557677d9e62251 is 69, key is 4f6a4780a2f6,41933,1734020809476/rs:state/1734020812010/Put/seqid=0 2024-12-12T16:30:05,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073742491_1667 (size=5156) 2024-12-12T16:30:05,579 INFO [M:0;4f6a4780a2f6:33187 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2111 (bloomFilter=true), to=hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/103b9965e12243fe84557677d9e62251 2024-12-12T16:30:05,583 DEBUG [M:0;4f6a4780a2f6:33187 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/03ddf111cffd497295ea892d2b8c5cd5 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/03ddf111cffd497295ea892d2b8c5cd5 2024-12-12T16:30:05,586 INFO [M:0;4f6a4780a2f6:33187 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/03ddf111cffd497295ea892d2b8c5cd5, entries=8, sequenceid=2111, filesize=5.5 K 2024-12-12T16:30:05,586 DEBUG [M:0;4f6a4780a2f6:33187 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3fd6c846e1f74fd4a4a0dfe841aecd55 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3fd6c846e1f74fd4a4a0dfe841aecd55 2024-12-12T16:30:05,589 INFO [M:0;4f6a4780a2f6:33187 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3fd6c846e1f74fd4a4a0dfe841aecd55 2024-12-12T16:30:05,589 INFO [M:0;4f6a4780a2f6:33187 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3fd6c846e1f74fd4a4a0dfe841aecd55, entries=179, sequenceid=2111, filesize=44.0 K 2024-12-12T16:30:05,590 DEBUG [M:0;4f6a4780a2f6:33187 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/103b9965e12243fe84557677d9e62251 as hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/103b9965e12243fe84557677d9e62251 2024-12-12T16:30:05,592 INFO [M:0;4f6a4780a2f6:33187 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:45065/user/jenkins/test-data/033d2d2b-f6d7-1080-edd2-00dd374a03bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/103b9965e12243fe84557677d9e62251, entries=1, sequenceid=2111, filesize=5.0 K 2024-12-12T16:30:05,593 INFO [M:0;4f6a4780a2f6:33187 {}] regionserver.HRegion(3040): Finished flush of dataSize ~753.72 KB/771812, heapSize ~925.16 KB/947360, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1287ms, sequenceid=2111, compaction requested=false 2024-12-12T16:30:05,594 INFO [M:0;4f6a4780a2f6:33187 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T16:30:05,594 DEBUG [M:0;4f6a4780a2f6:33187 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-12T16:30:05,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43013 is added to blk_1073741830_1006 (size=910266) 2024-12-12T16:30:05,596 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-12T16:30:05,596 INFO [M:0;4f6a4780a2f6:33187 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-12T16:30:05,597 INFO [M:0;4f6a4780a2f6:33187 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33187 2024-12-12T16:30:05,598 DEBUG [M:0;4f6a4780a2f6:33187 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/4f6a4780a2f6,33187,1734020808717 already deleted, retry=false 2024-12-12T16:30:05,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T16:30:05,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33187-0x100870d75a70000, quorum=127.0.0.1:52684, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T16:30:05,700 INFO [M:0;4f6a4780a2f6:33187 {}] regionserver.HRegionServer(1307): Exiting; stopping=4f6a4780a2f6,33187,1734020808717; zookeeper connection closed. 2024-12-12T16:30:05,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T16:30:05,708 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T16:30:05,708 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T16:30:05,708 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T16:30:05,708 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/hadoop.log.dir/,STOPPED} 2024-12-12T16:30:05,711 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-12T16:30:05,711 WARN [BP-1654502203-172.17.0.2-1734020805757 heartbeating to localhost/127.0.0.1:45065 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-12T16:30:05,711 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-12T16:30:05,711 WARN [BP-1654502203-172.17.0.2-1734020805757 heartbeating to localhost/127.0.0.1:45065 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1654502203-172.17.0.2-1734020805757 (Datanode Uuid ab0ae3d6-289e-4771-88b7-f95f718f4912) service to localhost/127.0.0.1:45065 2024-12-12T16:30:05,713 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/cluster_c3fbcf95-de6c-01d4-3e6c-8a7f0857b2b0/dfs/data/data1/current/BP-1654502203-172.17.0.2-1734020805757 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T16:30:05,714 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/cluster_c3fbcf95-de6c-01d4-3e6c-8a7f0857b2b0/dfs/data/data2/current/BP-1654502203-172.17.0.2-1734020805757 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T16:30:05,714 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-12T16:30:05,724 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-12T16:30:05,725 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T16:30:05,725 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T16:30:05,725 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T16:30:05,725 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/139f82cf-8370-d53f-65db-268928d5e290/hadoop.log.dir/,STOPPED} 2024-12-12T16:30:05,749 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-12T16:30:05,890 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down